From 6f4d31204638b4bce9344ee962b55dd949a058a7 Mon Sep 17 00:00:00 2001 From: jmeridth Date: Thu, 14 Dec 2023 12:16:20 -0600 Subject: [PATCH] chore: Update GitHub pages to redirect to readthedocs Relates to #12360 ### Motivation Change each github page to utilize meta tag to redirect to new location on readthedocs Since we don't host our previous documentation we can't do a real HTTP redirect. Meta tag is the next best thing https://www.w3.org/TR/WCAG20-TECHS/H76.html ### Modifications - [x] setup redirects to [readthedocs](https://argo-workflows.readthedocs.io/en/latest/) + previus page suffix - [x] use base styling for pages - Anton Signed-off-by: jmeridth Co-authored-by: Anton Gilgur --- 404.html | 3835 +-- CONTRIBUTING/index.html | 4188 +-- access-token/index.html | 4181 +-- architecture/index.html | 4043 +-- argo-server-auth-mode/index.html | 3942 +-- argo-server-sso-argocd/index.html | 4127 +-- argo-server-sso/index.html | 4276 +-- argo-server/index.html | 4351 +-- artifact-repository-ref/index.html | 3970 +-- artifact-visualization/index.html | 4151 +-- assets/javascripts/bundle.960e086b.min.js | 29 - assets/javascripts/bundle.960e086b.min.js.map | 8 - assets/javascripts/lunr/min/lunr.ar.min.js | 1 - assets/javascripts/lunr/min/lunr.da.min.js | 18 - assets/javascripts/lunr/min/lunr.de.min.js | 18 - assets/javascripts/lunr/min/lunr.du.min.js | 18 - assets/javascripts/lunr/min/lunr.es.min.js | 18 - assets/javascripts/lunr/min/lunr.fi.min.js | 18 - assets/javascripts/lunr/min/lunr.fr.min.js | 18 - assets/javascripts/lunr/min/lunr.hi.min.js | 1 - assets/javascripts/lunr/min/lunr.hu.min.js | 18 - assets/javascripts/lunr/min/lunr.it.min.js | 18 - assets/javascripts/lunr/min/lunr.ja.min.js | 1 - assets/javascripts/lunr/min/lunr.jp.min.js | 1 - assets/javascripts/lunr/min/lunr.multi.min.js | 1 - assets/javascripts/lunr/min/lunr.nl.min.js | 18 - assets/javascripts/lunr/min/lunr.no.min.js | 18 - assets/javascripts/lunr/min/lunr.pt.min.js | 18 - assets/javascripts/lunr/min/lunr.ro.min.js | 18 - assets/javascripts/lunr/min/lunr.ru.min.js | 18 - .../lunr/min/lunr.stemmer.support.min.js | 1 - assets/javascripts/lunr/min/lunr.sv.min.js | 18 - assets/javascripts/lunr/min/lunr.th.min.js | 1 - assets/javascripts/lunr/min/lunr.tr.min.js | 18 - assets/javascripts/lunr/min/lunr.vi.min.js | 1 - assets/javascripts/lunr/min/lunr.zh.min.js | 1 - assets/javascripts/lunr/tinyseg.js | 206 - assets/javascripts/lunr/wordcut.js | 6708 ---- .../workers/search.22074ed6.min.js | 48 - .../workers/search.22074ed6.min.js.map | 8 - assets/stylesheets/palette.e6a45f82.min.css | 1 - .../stylesheets/palette.e6a45f82.min.css.map | 1 - async-pattern/index.html | 4090 +-- cli/argo/index.html | 4192 +-- cli/argo_archive/index.html | 4076 +-- cli/argo_archive_delete/index.html | 4069 +-- cli/argo_archive_get/index.html | 4070 +-- cli/argo_archive_list-label-keys/index.html | 4069 +-- cli/argo_archive_list-label-values/index.html | 4070 +-- cli/argo_archive_list/index.html | 4072 +-- cli/argo_archive_resubmit/index.html | 4121 +-- cli/argo_archive_retry/index.html | 4121 +-- cli/argo_auth/index.html | 4070 +-- cli/argo_auth_token/index.html | 4069 +-- cli/argo_cluster-template/index.html | 4074 +-- cli/argo_cluster-template_create/index.html | 4095 +-- cli/argo_cluster-template_delete/index.html | 4070 +-- cli/argo_cluster-template_get/index.html | 4070 +-- cli/argo_cluster-template_lint/index.html | 4071 +-- cli/argo_cluster-template_list/index.html | 4094 +-- cli/argo_completion/index.html | 4091 +-- cli/argo_cp/index.html | 4096 +-- cli/argo_cron/index.html | 4092 +-- cli/argo_cron_create/index.html | 4079 +-- cli/argo_cron_delete/index.html | 4070 +-- cli/argo_cron_get/index.html | 4070 +-- cli/argo_cron_lint/index.html | 4071 +-- cli/argo_cron_list/index.html | 4072 +-- cli/argo_cron_resume/index.html | 4069 +-- cli/argo_cron_suspend/index.html | 4069 +-- cli/argo_delete/index.html | 4104 +-- cli/argo_executor-plugin/index.html | 4070 +-- cli/argo_executor-plugin_build/index.html | 4069 +-- cli/argo_get/index.html | 4096 +-- cli/argo_lint/index.html | 4096 +-- cli/argo_list/index.html | 4124 +-- cli/argo_logs/index.html | 4121 +-- cli/argo_node/index.html | 4096 +-- cli/argo_resubmit/index.html | 4141 +-- cli/argo_resume/index.html | 4101 +-- cli/argo_retry/index.html | 4144 +-- cli/argo_server/index.html | 4105 +-- cli/argo_stop/index.html | 4121 +-- cli/argo_submit/index.html | 4123 +-- cli/argo_suspend/index.html | 4091 +-- cli/argo_template/index.html | 4074 +-- cli/argo_template_create/index.html | 4071 +-- cli/argo_template_delete/index.html | 4070 +-- cli/argo_template_get/index.html | 4070 +-- cli/argo_template_lint/index.html | 4071 +-- cli/argo_template_list/index.html | 4071 +-- cli/argo_terminate/index.html | 4119 +-- cli/argo_version/index.html | 4070 +-- cli/argo_wait/index.html | 4093 +-- cli/argo_watch/index.html | 4094 +-- client-libraries/index.html | 4048 +-- cluster-workflow-templates/index.html | 4215 +-- conditional-artifacts-parameters/index.html | 4039 +-- configure-archive-logs/index.html | 4103 +-- configure-artifact-repository/index.html | 4762 +-- container-set-template/index.html | 4091 +-- cost-optimisation/index.html | 4205 +-- cron-backfill/index.html | 4009 +-- cron-workflows/index.html | 4433 +-- data-sourcing-and-transformation/index.html | 4039 +-- debug-pause/index.html | 4044 +-- default-workflow-specs/index.html | 4034 +-- disaster-recovery/index.html | 3940 +-- doc-changes/index.html | 4023 +-- empty-dir/index.html | 3963 +-- enhanced-depends-logic/index.html | 4090 +-- environment-variables/index.html | 4457 +-- estimated-duration/index.html | 3946 +-- events/index.html | 4345 +-- executor_plugins/index.html | 4388 +-- executor_swagger/index.html | 25987 +--------------- faq/index.html | 4049 +-- fields/index.html | 20635 +----------- high-availability/index.html | 4015 +-- http-template/index.html | 4023 +-- ide-setup/index.html | 4070 +-- index.html | 4274 +-- inline-templates/index.html | 3945 +-- installation/index.html | 4110 +-- intermediate-inputs/index.html | 4158 +-- key-only-artifacts/index.html | 3986 +-- kubectl/index.html | 3938 +-- lifecyclehook/index.html | 4078 +-- links/index.html | 3967 +-- managed-namespace/index.html | 3957 +-- manually-create-secrets/index.html | 4017 +-- memoization/index.html | 4071 +-- metrics/index.html | 4639 +-- node-field-selector/index.html | 4099 +-- offloading-large-workflows/index.html | 4052 +-- plugin-directory/index.html | 3987 +-- plugins/index.html | 3942 +-- progress/index.html | 4038 +-- proposals/artifact-gc-proposal/index.html | 4047 +-- .../cron-wf-improvement-proposal/index.html | 4033 +-- .../makefile-improvement-proposal/index.html | 4059 +-- public-api/index.html | 3936 +-- quick-start/index.html | 4162 +-- releases/index.html | 4180 +-- releasing/index.html | 4046 +-- resource-duration/index.html | 4102 +-- resource-template/index.html | 3936 +-- rest-api/index.html | 3995 +-- rest-examples/index.html | 4084 +-- retries/index.html | 4106 +-- roadmap/index.html | 3914 +-- running-at-massive-scale/index.html | 4060 +-- running-locally/index.html | 4303 +-- running-nix/index.html | 4071 +-- scaling/index.html | 4190 +-- security/index.html | 4250 +-- service-accounts/index.html | 4034 +-- sidecar-injection/index.html | 4073 +-- static-code-analysis/index.html | 3936 +-- stress-testing/index.html | 4023 +-- survey-data-privacy/index.html | 3931 +-- suspend-template/index.html | 3936 +-- swagger/index.html | 3957 +-- synchronization/index.html | 4162 +-- template-defaults/index.html | 4050 +-- tls/index.html | 4132 +-- tolerating-pod-deletion/index.html | 4007 +-- training/index.html | 4011 +-- upgrading/index.html | 4697 +-- use-cases/ci-cd/index.html | 4005 +-- use-cases/data-processing/index.html | 4018 +-- .../infrastructure-automation/index.html | 4004 +-- use-cases/machine-learning/index.html | 4030 +-- use-cases/other/index.html | 3984 +-- use-cases/stream-processing/index.html | 3933 +-- use-cases/webhdfs/index.html | 4046 +-- variables/index.html | 4814 +-- walk-through/argo-cli/index.html | 4004 +-- walk-through/artifacts/index.html | 4279 +-- walk-through/conditionals/index.html | 4007 +-- .../index.html | 3944 +-- .../index.html | 3967 +-- walk-through/daemon-containers/index.html | 4005 +-- walk-through/dag/index.html | 4042 +-- .../index.html | 3965 +-- walk-through/exit-handlers/index.html | 3985 +-- walk-through/hardwired-artifacts/index.html | 3974 +-- walk-through/hello-world/index.html | 3979 +-- walk-through/index.html | 3942 +-- walk-through/kubernetes-resources/index.html | 4003 +-- walk-through/loops/index.html | 4253 +-- walk-through/output-parameters/index.html | 4068 +-- walk-through/parameters/index.html | 4001 +-- walk-through/recursion/index.html | 3993 +-- .../index.html | 3964 +-- walk-through/scripts-and-results/index.html | 3986 +-- walk-through/secrets/index.html | 3968 +-- walk-through/sidecars/index.html | 3953 +-- walk-through/steps/index.html | 3982 +-- walk-through/suspending/index.html | 3971 +-- .../index.html | 3957 +-- walk-through/timeouts/index.html | 3962 +-- walk-through/volumes/index.html | 4115 +-- webhooks/index.html | 3951 +-- widgets/index.html | 3940 +-- windows/index.html | 4143 +-- work-avoidance/index.html | 3962 +-- workflow-archive/index.html | 4154 +-- workflow-concepts/index.html | 4285 +-- workflow-controller-configmap/index.html | 4042 +-- workflow-creator/index.html | 3950 +-- workflow-events/index.html | 3972 +-- workflow-executors/index.html | 4227 +-- workflow-inputs/index.html | 4108 +-- workflow-notifications/index.html | 3944 +-- workflow-of-workflows/index.html | 4091 +-- workflow-pod-security-context/index.html | 3953 +-- workflow-rbac/index.html | 3970 +-- workflow-restrictions/index.html | 4029 +-- workflow-submitting-workflow/index.html | 3958 +-- workflow-templates/index.html | 4511 +-- 221 files changed, 8330 insertions(+), 808696 deletions(-) delete mode 100644 assets/javascripts/bundle.960e086b.min.js delete mode 100644 assets/javascripts/bundle.960e086b.min.js.map delete mode 100644 assets/javascripts/lunr/min/lunr.ar.min.js delete mode 100644 assets/javascripts/lunr/min/lunr.da.min.js delete mode 100644 assets/javascripts/lunr/min/lunr.de.min.js delete mode 100644 assets/javascripts/lunr/min/lunr.du.min.js delete mode 100644 assets/javascripts/lunr/min/lunr.es.min.js delete mode 100644 assets/javascripts/lunr/min/lunr.fi.min.js delete mode 100644 assets/javascripts/lunr/min/lunr.fr.min.js delete mode 100644 assets/javascripts/lunr/min/lunr.hi.min.js delete mode 100644 assets/javascripts/lunr/min/lunr.hu.min.js delete mode 100644 assets/javascripts/lunr/min/lunr.it.min.js delete mode 100644 assets/javascripts/lunr/min/lunr.ja.min.js delete mode 100644 assets/javascripts/lunr/min/lunr.jp.min.js delete mode 100644 assets/javascripts/lunr/min/lunr.multi.min.js delete mode 100644 assets/javascripts/lunr/min/lunr.nl.min.js delete mode 100644 assets/javascripts/lunr/min/lunr.no.min.js delete mode 100644 assets/javascripts/lunr/min/lunr.pt.min.js delete mode 100644 assets/javascripts/lunr/min/lunr.ro.min.js delete mode 100644 assets/javascripts/lunr/min/lunr.ru.min.js delete mode 100644 assets/javascripts/lunr/min/lunr.stemmer.support.min.js delete mode 100644 assets/javascripts/lunr/min/lunr.sv.min.js delete mode 100644 assets/javascripts/lunr/min/lunr.th.min.js delete mode 100644 assets/javascripts/lunr/min/lunr.tr.min.js delete mode 100644 assets/javascripts/lunr/min/lunr.vi.min.js delete mode 100644 assets/javascripts/lunr/min/lunr.zh.min.js delete mode 100644 assets/javascripts/lunr/tinyseg.js delete mode 100644 assets/javascripts/lunr/wordcut.js delete mode 100644 assets/javascripts/workers/search.22074ed6.min.js delete mode 100644 assets/javascripts/workers/search.22074ed6.min.js.map delete mode 100644 assets/stylesheets/palette.e6a45f82.min.css delete mode 100644 assets/stylesheets/palette.e6a45f82.min.css.map diff --git a/404.html b/404.html index bf9ee698d790..fe59df6f0497 100644 --- a/404.html +++ b/404.html @@ -1,3809 +1,68 @@ - - - - - - - - - - - - - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - -
- -
-
- -
- - - - - - -
- - - - - +
- - - - - - - -
- - - -
- - - -
-
-
- - - - - - -
-
-
- - - +
+
+
+
- - - - Back to top - -
- - - -
-
-
+ +
- - - - - - \ No newline at end of file diff --git a/CONTRIBUTING/index.html b/CONTRIBUTING/index.html index dea365877dca..7668e8a9d1e8 100644 --- a/CONTRIBUTING/index.html +++ b/CONTRIBUTING/index.html @@ -1,4168 +1,68 @@ - - - - - - - - - - - - - Contributing - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Contributing - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - -
- - - - Skip to content - - -
-
- -
- - - - - - -
- - - - - +
- - - - - - - -
- - - -
- - - -
-
-
- - - - - - -
-
-
- - - -
- -
- - +
+
+
+
- - - - - - - - -

Contributing

-

How To Provide Feedback

-

Please raise an issue in Github.

-

Code of Conduct

-

See CNCF Code of Conduct.

-

Community Meetings (monthly)

-

A monthly opportunity for users and maintainers of Workflows and Events to share their current work and -hear about what’s coming on the roadmap. Please join us! For Community Meeting information, minutes and recordings -please see here.

-

Contributor Meetings (twice monthly)

-

A weekly opportunity for committers and maintainers of Workflows and Events to discuss their current work and -talk about what’s next. Feel free to join us! For Contributor Meeting information, minutes and recordings -please see here.

-

How To Contribute

-

We're always looking for contributors.

-
    -
  • Documentation - something missing or unclear? Please submit a pull request!
  • -
  • Code contribution - investigate - a good first issue - , or anything not assigned.
  • -
  • You can work on an issue without being assigned.
  • -
  • Join the #argo-contributors channel on our Slack.
  • -
-

Running Locally

-

To run Argo Workflows locally for development: running locally.

-

Committing

-

See the Committing Guidelines.

-

Dependencies

-

Dependencies increase the risk of security issues and have on-going maintenance costs.

-

The dependency must pass these test:

-
    -
  • A strong use case.
  • -
  • It has an acceptable license (e.g. MIT).
  • -
  • It is actively maintained.
  • -
  • It has no security issues.
  • -
-

Example, should we add fasttemplate -, view the Snyk report:

- - - - - - - - - - - - - - - - - - - - - - - - - -
TestOutcome
A strong use case.❌ Fail. We can use text/template.
It has an acceptable license (e.g. MIT)✅ Pass. MIT license.
It is actively maintained.❌ Fail. Project is inactive.
It has no security issues.✅ Pass. No known security issues.
-

No, we should not add that dependency.

-

Test Policy

-

Changes without either unit or e2e tests are unlikely to be accepted. -See the pull request template.

-

Contributor Workshop

-

Please check out the following resources if you are interested in contributing:

- - - - - -

Comments

- - +

Contributing - Argo Workflows - The workflow engine for Kubernetes

+

This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

+

You should be redirected there automatically. Please click the link above if you are not redirected.

- - - - Back to top - -
- - - -
-
-
+
+ - - - - - - \ No newline at end of file diff --git a/access-token/index.html b/access-token/index.html index 63bde8bdd7ab..23882ad316ab 100644 --- a/access-token/index.html +++ b/access-token/index.html @@ -1,4161 +1,68 @@ - - - - - - - - - - - - - Access Token - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Access Token - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - -
- - - - Skip to content - - -
-
- -
- - - - - - -
- - - - - +
- - - - - - - -
- - - -
- - - -
-
-
- - - - - - -
-
-
- - - -
- -
- - +
+
+
+
- - - - - - - - -

Access Token

-

Overview

-

If you want to automate tasks with the Argo Server API or CLI, you will need an access token.

-

Prerequisites

-

Firstly, create a role with minimal permissions. This example role for jenkins only permission to update and list workflows:

-
kubectl create role jenkins --verb=list,update --resource=workflows.argoproj.io
-
-

Create a service account for your service:

-
kubectl create sa jenkins
-
-

Tip for Tokens Creation

-

Create a unique service account for each client:

-
    -
  • (a) you'll be able to correctly secure your workflows
  • -
  • (b) revoke the token without impacting other clients.
  • -
-

Bind the service account to the role (in this case in the argo namespace):

-
kubectl create rolebinding jenkins --role=jenkins --serviceaccount=argo:jenkins
-
-

Token Creation

-

You now need to create a secret to hold your token:

-
    kubectl apply -f - <<EOF
-apiVersion: v1
-kind: Secret
-metadata:
-  name: jenkins.service-account-token
-  annotations:
-    kubernetes.io/service-account.name: jenkins
-type: kubernetes.io/service-account-token
-EOF
-
-

Wait a few seconds:

-
ARGO_TOKEN="Bearer $(kubectl get secret jenkins.service-account-token -o=jsonpath='{.data.token}' | base64 --decode)"
-echo $ARGO_TOKEN
-Bearer ZXlKaGJHY2lPaUpTVXpJMU5pSXNJbXRwWkNJNkltS...
-
-

Token Usage & Test

-

To use that token with the CLI you need to set ARGO_SERVER (see argo --help).

-

Use that token in your API requests, e.g. to list workflows:

-
curl https://localhost:2746/api/v1/workflows/argo -H "Authorization: $ARGO_TOKEN"
-# 200 OK
-
-

You should check you cannot do things you're not allowed!

-
curl https://localhost:2746/api/v1/workflow-templates/argo -H "Authorization: $ARGO_TOKEN"
-# 403 error
-
-

Token Usage - Docker

-

Set additional params to initialize Argo settings

-
ARGO_SERVER="${{HOST}}:443"
-KUBECONFIG=/dev/null
-ARGO_NAMESPACE=sandbox
-
-

Start container with settings above

-

Example for listing templates in a namespace:

-
docker run --rm -it \
-  -e ARGO_SERVER=$ARGO_SERVER \
-  -e ARGO_TOKEN=$ARGO_TOKEN \
-  -e ARGO_HTTP=false \
-  -e ARGO_HTTP1=true \
-  -e KUBECONFIG=/dev/null \
-  -e ARGO_NAMESPACE=$ARGO_NAMESPACE  \
-  argoproj/argocli:latest template list -v -e -k
-
-

Token Revocation

-

Token compromised?

-
kubectl delete secret $SECRET
-
-

A new one will be created.

- - - - -

Comments

- - +

Access Token - Argo Workflows - The workflow engine for Kubernetes

+

This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

+

You should be redirected there automatically. Please click the link above if you are not redirected.

- - - - Back to top - -
- - - -
-
-
+
+ - - - - - - \ No newline at end of file diff --git a/architecture/index.html b/architecture/index.html index 18fbf4e8f89e..31ed5358a5d5 100644 --- a/architecture/index.html +++ b/architecture/index.html @@ -1,4023 +1,68 @@ - - - - - - - - - - - - - Architecture - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Architecture - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - -
- - - - Skip to content - - -
-
- -
- - - - - - -
- - - - - +
- - - - - - - -
- - - -
- - - -
-
-
- - - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - +
+
+
+
- - - - - - - - -

Architecture

-

Diagram

-

The following diagram shows the components of the Argo Workflows architecture. There are two Deployments: Workflow Controller and Argo Server. The former does all of the reconciling, and the latter serves the API. Note that the Controller can be used stand alone.

-

The reconciliation code for the WorkflowController can be found in workflow/controller/controller.go. The Argo Server opens up an HTTP(S) listener at server/apiserver/argoserver.go.

-

diagram

-
-

Argo Workflow Overview

-

The diagram below provides a little more detail as far as namespaces. The Workflow Controller and Argo Server both run in the argo namespace. Assuming Argo Workflows was installed as a Cluster Install or as a Managed Namespace Install (described here), the Workflows and the Pods generated from them run in a separate namespace.

-

The internals of a Pod are also shown. Each Step and each DAG Task cause a Pod to be generated, and each of these is composed of 3 containers:

-
    -
  • main container runs the Image that the user indicated, where the argoexec utility is volume mounted and serves as the main command which calls the configured Command as a sub-process
  • -
  • init container is an InitContainer, fetching artifacts and parameters and making them available to the main container
  • -
  • wait container performs tasks that are needed for clean up, including saving off parameters and artifacts
  • -
-

Look in cmd/argoexec for this code.

-

diagram

-
-

Workflow controller architecture

-

The following diagram shows the process for reconciliation, whereby a set of worker goroutines process the Workflows which have been added to a Workflow queue based on adds and updates to Workflows and Workflow Pods. Note that in addition to the Informers shown, there are Informers for the other CRDs that Argo Workflows uses as well. You can find this code in workflow/controller/controller.go. Note that the controller only ever processes a single Workflow at a time.

-

diagram

-
-

Various configurations for Argo UI and Argo Server

-

The top diagram below shows what happens if you run "make start UI=true" locally (recommended if you need the UI during local development). This runs a React application (Webpack HTTP server) locally which serves the index.html and typescript files from port 8080. From the typescript code there are calls made to the back end API (Argo Server) at port 2746. The Webpack HTTP server is configured for hot reload, meaning the UI will update automatically based on local code changes.

-

The second diagram is an alternative approach for rare occasions that the React files are broken and you're doing local development. In this case, everything is served from the Argo Server at port 2746.

-

The third diagram shows how things are configured for a Kubernetes environment. It is similar to the second diagram in that the Argo Server hosts everything for the UI.

-

diagram

- - - - -

Comments

- - +

Architecture - Argo Workflows - The workflow engine for Kubernetes

+

This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

+

You should be redirected there automatically. Please click the link above if you are not redirected.

- - - - Back to top - -
- - - -
-
-
+
+ - - - - - - \ No newline at end of file diff --git a/argo-server-auth-mode/index.html b/argo-server-auth-mode/index.html index b99d77aee97b..52fb34ee594e 100644 --- a/argo-server-auth-mode/index.html +++ b/argo-server-auth-mode/index.html @@ -1,3922 +1,68 @@ - - - - - - - - - - - - - Argo Server Auth Mode - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Argo Server Auth Mode - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - -
- - - - Skip to content - - -
-
- -
- - - - - - -
- - - - - +
- - - - - - - -
- - - -
- - - -
-
-
- - - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - +
+
+
+
- - - - - - - - -

Argo Server Auth Mode

-

You can choose which kube config the Argo Server uses:

-
    -
  • server - in hosted mode, use the kube config of service account, in local mode, use your local kube config.
  • -
  • client - requires clients to provide their Kubernetes bearer token and use that.
  • -
  • sso - since v2.9, use single sign-on, this will use the same service account as per "server" for RBAC. We expect to change this in the future so that the OAuth claims are mapped to service accounts.
  • -
-

The server used to start with auth mode of "server" by default, but since v3.0 it defaults to the "client".

-

To change the server auth mode specify the list as multiple auth-mode flags:

-
argo server --auth-mode=sso --auth-mode=...
-
- - - - -

Comments

- - +

Argo Server Auth Mode - Argo Workflows - The workflow engine for Kubernetes

+

This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

+

You should be redirected there automatically. Please click the link above if you are not redirected.

- - - - Back to top - -
- - - -
-
-
+
+ - - - - - - \ No newline at end of file diff --git a/argo-server-sso-argocd/index.html b/argo-server-sso-argocd/index.html index 7350cb8f731a..cbb58c7ae498 100644 --- a/argo-server-sso-argocd/index.html +++ b/argo-server-sso-argocd/index.html @@ -1,4107 +1,68 @@ - - - - - - - - - - - - - Use Argo CD Dex for authentication - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Use Argo CD Dex for authentication - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - -
- - - - Skip to content - - -
-
- -
- - - - - - -
- - - - - +
- - - - - - - -
- - - -
- - - -
-
-
- - - - - - -
-
-
- - - - - - +
+
+
+
- - - - - - - - -

Use Argo CD Dex for authentication

-

It is possible to have the Argo Workflows Server use the Argo CD Dex instance for authentication, for instance if you use Okta with SAML which cannot integrate with Argo Workflows directly. In order to make this happen, you will need the following:

-
    -
  • You must be using at least Dex v2.35.0, because that's when staticClients[].secretEnv was added. That means Argo CD 1.7.12 and above.
  • -
  • A secret containing two keys, client-id and client-secret to be used by both Dex and Argo Workflows Server. client-id is argo-workflows-sso in this example, client-secret can be any random string. If Argo CD and Argo Workflows are installed in different namespaces the secret must be present in both of them. Example:
  • -
-
apiVersion: v1
-kind: Secret
-metadata:
-  name: argo-workflows-sso
-data:
-  # client-id is 'argo-workflows-sso'
-  client-id: YXJnby13b3JrZmxvd3Mtc3Nv
-  # client-secret is 'MY-SECRET-STRING-CAN-BE-UUID'
-  client-secret: TVktU0VDUkVULVNUUklORy1DQU4tQkUtVVVJRA==
-
-
    -
  • --auth-mode=sso server argument added
  • -
  • A Dex staticClients configured for argo-workflows-sso
  • -
  • The sso configuration filled out in Argo Workflows Server to match
  • -
-

Example manifests for authenticating against Argo CD's Dex (Kustomize)

-

In Argo CD, add an environment variable to Dex deployment and configuration:

-
---
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: argocd-dex-server
-spec:
-  template:
-    spec:
-      containers:
-        - name: dex
-          env:
-            - name: ARGO_WORKFLOWS_SSO_CLIENT_SECRET
-              valueFrom:
-                secretKeyRef:
-                  name: argo-workflows-sso
-                  key: client-secret
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: argocd-cm
-data:
-  # Kustomize sees the value of dex.config as a single string instead of yaml. It will not merge
-  # Dex settings, but instead it will replace the entire configuration with the settings below,
-  # so add these to the existing config instead of setting them in a separate file
-  dex.config: |
-    # Setting staticClients allows Argo Workflows to use Argo CD's Dex installation for authentication
-    staticClients:
-      - id: argo-workflows-sso
-        name: Argo Workflow
-        redirectURIs:
-          - https://argo-workflows.mydomain.com/oauth2/callback
-        secretEnv: ARGO_WORKFLOWS_SSO_CLIENT_SECRET
-
-

Note that the id field of staticClients must match the client-id.

-

In Argo Workflows add --auth-mode=sso argument to argo-server deployment.

-
---
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: argo-server
-spec:
-  template:
-    spec:
-      containers:
-        - name: argo-server
-          args:
-            - server
-            - --auth-mode=sso
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: workflow-controller-configmap
-data:
-  # SSO Configuration for the Argo server.
-  # You must also start argo server with `--auth-mode sso`.
-  # https://argoproj.github.io/argo-workflows/argo-server-auth-mode/
-  sso: |
-    # This is the root URL of the OIDC provider (required).
-    issuer: https://argo-cd.mydomain.com/api/dex
-    # This is name of the secret and the key in it that contain OIDC client
-    # ID issued to the application by the provider (required).
-    clientId:
-      name: argo-workflows-sso
-      key: client-id
-    # This is name of the secret and the key in it that contain OIDC client
-    # secret issued to the application by the provider (required).
-    clientSecret:
-      name: argo-workflows-sso
-      key: client-secret
-    # This is the redirect URL supplied to the provider (required). It must
-    # be in the form <argo-server-root-url>/oauth2/callback. It must be
-    # browser-accessible.
-    redirectUrl: https://argo-workflows.mydomain.com/oauth2/callback
-
-

Example Helm chart configuration for authenticating against Argo CD's Dex

-

argo-cd/values.yaml:

-
     dex:
-       image:
-         tag: v2.35.0
-       env:
-         - name: ARGO_WORKFLOWS_SSO_CLIENT_SECRET
-           valueFrom:
-             secretKeyRef:
-               name: argo-workflows-sso
-               key: client-secret
-     server:
-       config:
-         dex.config: |
-           staticClients:
-           - id: argo-workflows-sso
-             name: Argo Workflow
-             redirectURIs:
-               - https://argo-workflows.mydomain.com/oauth2/callback
-             secretEnv: ARGO_WORKFLOWS_SSO_CLIENT_SECRET
-
-

argo-workflows/values.yaml:

-
     server:
-       extraArgs:
-         - --auth-mode=sso
-       sso:
-         issuer: https://argo-cd.mydomain.com/api/dex
-         # sessionExpiry defines how long your login is valid for in hours. (optional, default: 10h)
-         sessionExpiry: 240h
-         clientId:
-           name: argo-workflows-sso
-           key: client-id
-         clientSecret:
-           name: argo-workflows-sso
-           key: client-secret
-         redirectUrl: https://argo-workflows.mydomain.com/oauth2/callback
-
- - - - -

Comments

- - +

Use Argo CD Dex for authentication - Argo Workflows - The workflow engine for Kubernetes

+

This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

+

You should be redirected there automatically. Please click the link above if you are not redirected.

- - - - Back to top - -
- - - -
-
-
+
+ - - - - - - \ No newline at end of file diff --git a/argo-server-sso/index.html b/argo-server-sso/index.html index 94c8b2a55665..3e62d2057ae4 100644 --- a/argo-server-sso/index.html +++ b/argo-server-sso/index.html @@ -1,4256 +1,68 @@ - - - - - - - - - - - - - Argo Server SSO - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Argo Server SSO - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - -
- - - - Skip to content - - -
-
- -
- - - - - - -
- - - - - +
- - - - - - - -
- - - -
- - - -
-
-
- - - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - +
+
+
+
- - - - - - - - -

Argo Server SSO

-
-

v2.9 and after

-
-

It is possible to use Dex for authentication. This document describes how to set up Argo Workflows and Argo CD so that Argo Workflows uses Argo CD's Dex server for authentication.

-

To start Argo Server with SSO

-

Firstly, configure the settings workflow-controller-configmap.yaml with the correct OAuth 2 values. If working towards an OIDC configuration the Argo CD project has guides on its similar (though different) process for setting up OIDC providers. It also includes examples for specific providers. The main difference is that the Argo CD docs mention that their callback address endpoint is /auth/callback. For Argo Workflows, the default format is /oauth2/callback as shown in this comment in the default values.yaml file in the helm chart.

-

Next, create the Kubernetes secrets for holding the OAuth2 client-id and client-secret. You may refer to the kubernetes documentation on Managing secrets. For example by using kubectl with literals:

-
kubectl create secret -n argo generic client-id-secret \
-  --from-literal=client-id-key=foo
-
-kubectl create secret -n argo generic client-secret-secret \
-  --from-literal=client-secret-key=bar
-
-

Then, start the Argo Server using the SSO auth mode:

-
argo server --auth-mode sso --auth-mode ...
-
-

Token Revocation

-
-

v2.12 and after

-
-

As of v2.12 we issue a JWE token for users rather than give them the ID token from your OAuth2 provider. This token is opaque and has a longer expiry time (10h by default).

-

The token encryption key is automatically generated by the Argo Server and stored in a Kubernetes secret name sso.

-

You can revoke all tokens by deleting the encryption key and restarting the Argo Server (so it generates a new key).

-
kubectl delete secret sso
-
-
-

Warning

-

The old key will be in the memory the any running Argo Server, and they will therefore accept and user with token encrypted using the old key. Every Argo Server MUST be restarted.

-
-

All users will need to log in again. Sorry.

-

SSO RBAC

-
-

v2.12 and after

-
-

You can optionally add RBAC to SSO. This allows you to give different users different access levels. Except for client auth mode, all users of the Argo Server must ultimately use a service account. So we allow you to define rules that map a user (maybe using their OIDC groups) to a service account in the same namespace as argo server by annotating the service account.

-

To allow service accounts to manage resources in other namespaces create a role and role binding in the target namespace.

-

RBAC config is installation-level, so any changes will need to be made by the team that installed Argo. Many complex rules will be burdensome on that team.

-

Firstly, enable the rbac: setting in workflow-controller-configmap.yaml. You likely want to configure RBAC using groups, so add scopes: to the SSO settings:

-
sso:
-  # ...
-  scopes:
-   - groups
-  rbac:
-    enabled: true
-
-
-

Note

-

Not all OIDC providers support the groups scope. Please speak to your provider about their options.

-
-

To configure a service account to be used, annotate it:

-
apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: admin-user
-  annotations:
-    # The rule is an expression used to determine if this service account
-    # should be used.
-    # * `groups` - an array of the OIDC groups
-    # * `iss` - the issuer ("argo-server")
-    # * `sub` - the subject (typically the username)
-    # Must evaluate to a boolean.
-    # If you want an account to be the default to use, this rule can be "true".
-    # Details of the expression language are available in
-    # https://github.com/antonmedv/expr/blob/master/docs/Language-Definition.md.
-    workflows.argoproj.io/rbac-rule: "'admin' in groups"
-    # The precedence is used to determine which service account to use whe
-    # Precedence is an integer. It may be negative. If omitted, it defaults to "0".
-    # Numerically higher values have higher precedence (not lower, which maybe
-    # counter-intuitive to you).
-    # If two rules match and have the same precedence, then which one used will
-    # be arbitrary.
-    workflows.argoproj.io/rbac-rule-precedence: "1"
-
-

If no rule matches, we deny the user access.

-

Tip: You'll probably want to configure a default account to use if no other rule matches, e.g. a read-only account, you can do this as follows:

-
metadata:
-  name: read-only
-  annotations:
-    workflows.argoproj.io/rbac-rule: "true"
-    workflows.argoproj.io/rbac-rule-precedence: "0"
-
-

The precedence must be the lowest of all your service accounts.

-

As of Kubernetes v1.24, secrets for a service account token are no longer automatically created. -Therefore, service account secrets for SSO RBAC must be created manually. -See Manually create secrets for detailed instructions.

-

SSO RBAC Namespace Delegation

-
-

v3.3 and after

-
-

You can optionally configure RBAC SSO per namespace. -Typically, on organization has a Kubernetes cluster and a central team (the owner of the cluster) manages the cluster. Along with this, there are multiple namespaces which are owned by individual teams. This feature would help namespace owners to define RBAC for their own namespace.

-

The feature is currently in beta. -To enable the feature, set env variable SSO_DELEGATE_RBAC_TO_NAMESPACE=true in your argo-server deployment.

- -

Configure a default account in the installation namespace that allows access to all users of your organization. This service account allows a user to login to the cluster. You could optionally add a workflow read-only role and role-binding.

-
apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: user-default-login
-  annotations:
-    workflows.argoproj.io/rbac-rule: "true"
-    workflows.argoproj.io/rbac-rule-precedence: "0"
-
-
-

Note

-

All users MUST map to a cluster service account (such as the one above) before a namespace service account can apply.

-
-

Now, for the namespace that you own, configure a service account that allows members of your team to perform operations in your namespace. -Make sure that the precedence of the namespace service account is higher than the precedence of the login service account. Create an appropriate role for this service account and bind it with a role-binding.

-
apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: my-namespace-read-write-user
-  namespace: my-namespace
-  annotations:
-    workflows.argoproj.io/rbac-rule: "'my-team' in groups"
-    workflows.argoproj.io/rbac-rule-precedence: "1"
-
-

With this configuration, when a user is logged in via SSO, makes a request in my-namespace, and the rbac-rule matches, this service account allows the user to perform that operation. If no service account matches in the namespace, the first service account (user-default-login) and its associated role will be used to perform the operation.

-

SSO Login Time

-
-

v2.12 and after

-
-

By default, your SSO session will expire after 10 hours. You can change this by adding a sessionExpiry to your workflow-controller-configmap.yaml under the SSO heading.

-
sso:
-  # Expiry defines how long your login is valid for in hours. (optional)
-  sessionExpiry: 240h
-
-

Custom claims

-
-

v3.1.4 and after

-
-

If your OIDC provider provides groups information with a claim name other than groups, you could configure config-map to specify custom claim name for groups. Argo now arbitrary custom claims and any claim can be used for expr eval. However, since group information is displayed in UI, it still needs to be an array of strings with group names as elements.

-

The customClaim in this case will be mapped to groups key and we can use the same key groups for evaluating our expressions

-
sso:
-  # Specify custom claim name for OIDC groups.
-  customGroupClaimName: argo_groups
-
-

If your OIDC provider provides groups information only using the user-info endpoint (e.g. Okta), you could configure userInfoPath to specify the user info endpoint that contains the groups claim.

-
sso:
-  userInfoPath: /oauth2/v1/userinfo
-
-

Example Expression

-
# assuming customClaimGroupName: argo_groups
-workflows.argoproj.io/rbac-rule: "'argo_admins' in groups"
-
-

Filtering groups

-
-

v3.5 and above

-
-

You can configure filterGroupsRegex to filter the groups returned by the OIDC provider. Some use-cases for this include:

-
    -
  • You have multiple applications using the same OIDC provider, and you only want to use groups that are relevant to Argo Workflows.
  • -
  • You have many groups and exceed the 4KB cookie size limit (cookies are used to store authentication tokens). If this occurs, login will fail.
  • -
-
sso:
-    # Specify a list of regular expressions to filter the groups returned by the OIDC provider.
-    # A logical "OR" is used between each regex in the list
-    filterGroupsRegex:
-    - ".*argo-wf.*"
-    - ".*argo-workflow.*"
-
- - - - -

Comments

- - +

Argo Server SSO - Argo Workflows - The workflow engine for Kubernetes

+

This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

+

You should be redirected there automatically. Please click the link above if you are not redirected.

- - - - Back to top - -
- - - -
-
-
+
+ - - - - - - \ No newline at end of file diff --git a/argo-server/index.html b/argo-server/index.html index f1170a731568..0607dc5a1a75 100644 --- a/argo-server/index.html +++ b/argo-server/index.html @@ -1,4331 +1,68 @@ - - - - - - - - - - - - - Argo Server - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Argo Server - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - -
- - - - Skip to content - - -
-
- -
- - - - - - -
- - - - - +
- - - - - - - -
- - - -
- - - -
-
-
- - - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - +
+
+
+
- - - - - - - - -

Argo Server

-
-

v2.5 and after

-
-
-

HTTP vs HTTPS

-

Since v3.0 the Argo Server listens for HTTPS requests, rather than HTTP.

-
-

The Argo Server is a server that exposes an API and UI for workflows. You'll need to use this if you want to offload large workflows or the workflow archive.

-

You can run this in either "hosted" or "local" mode.

-

It replaces the Argo UI.

-

Hosted Mode

-

Use this mode if:

-
    -
  • You want a drop-in replacement for the Argo UI.
  • -
  • If you need to prevent users from directly accessing the database.
  • -
-

Hosted mode is provided as part of the standard manifests, specifically in argo-server-deployment.yaml .

-

Local Mode

-

Use this mode if:

-
    -
  • You want something that does not require complex set-up.
  • -
  • You do not need to run a database.
  • -
-

To run locally:

-
argo server
-
-

This will start a server on port 2746 which you can view.

-

Options

-

Auth Mode

-

See auth.

-

Managed Namespace

-

See managed namespace.

-

Base HREF

-

If the server is running behind reverse proxy with a sub-path different from / (for example, -/argo), you can set an alternative sub-path with the --basehref flag or the BASE_HREF -environment variable.

-

You probably now should read how to set-up an ingress

-

Transport Layer Security

-

See TLS.

-

SSO

-

See SSO. See here about sharing Argo CD's Dex with Argo Workflows.

-

Access the Argo Workflows UI

-

By default, the Argo UI service is not exposed with an external IP. To access the UI, use one of the -following:

-

kubectl port-forward

-
kubectl -n argo port-forward svc/argo-server 2746:2746
-
-

Then visit: https://localhost:2746

-

Expose a LoadBalancer

-

Update the service to be of type LoadBalancer.

-
kubectl patch svc argo-server -n argo -p '{"spec": {"type": "LoadBalancer"}}'
-
-

Then wait for the external IP to be made available:

-
kubectl get svc argo-server -n argo
-
-
NAME          TYPE           CLUSTER-IP     EXTERNAL-IP   PORT(S)          AGE
-argo-server   LoadBalancer   10.43.43.130   172.18.0.2    2746:30008/TCP   18h
-
-

Ingress

-

You can get ingress working as follows:

-

Add BASE_HREF as environment variable to deployment/argo-server. Do not forget to add a trailing '/' character.

-
---
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: argo-server
-spec:
-  selector:
-    matchLabels:
-      app: argo-server
-  template:
-    metadata:
-      labels:
-        app: argo-server
-    spec:
-      containers:
-      - args:
-        - server
-        env:
-          - name: BASE_HREF
-            value: /argo/
-        image: argoproj/argocli:latest
-        name: argo-server
-...
-
-

Create a ingress, with the annotation ingress.kubernetes.io/rewrite-target: /:

-
-

If TLS is enabled (default in v3.0 and after), the ingress controller must be told -that the backend uses HTTPS. The method depends on the ingress controller, e.g. -Traefik expects an ingress.kubernetes.io/protocol annotation, while ingress-nginx -uses nginx.ingress.kubernetes.io/backend-protocol

-
-
apiVersion: networking.k8s.io/v1beta1
-kind: Ingress
-metadata:
-  name: argo-server
-  annotations:
-    ingress.kubernetes.io/rewrite-target: /$2
-    ingress.kubernetes.io/protocol: https # Traefik
-    nginx.ingress.kubernetes.io/backend-protocol: https # ingress-nginx
-spec:
-  rules:
-    - http:
-        paths:
-          - backend:
-              serviceName: argo-server
-              servicePort: 2746
-            path: /argo(/|$)(.*)
-
-

Learn more

-

Security

-

Users should consider the following in their set-up of the Argo Server:

-

API Authentication Rate Limiting

-

Argo Server does not perform authentication directly. It delegates this to either the Kubernetes API Server (when --auth-mode=client) and the OAuth provider (when --auth-mode=sso). In each case, it is recommended that the delegate implements any authentication rate limiting you need.

-

IP Address Logging

-

Argo Server does not log the IP addresses of API requests. We recommend you put the Argo Server behind a load balancer, and that load balancer is configured to log the IP addresses of requests that return authentication or authorization errors.

-

Rate Limiting

-
-

v3.4 and after

-
-

Argo Server by default rate limits to 1000 per IP per minute, you can configure it through --api-rate-limit. You can access additional information through the following headers.

-
    -
  • X-Rate-Limit-Limit - the rate limit ceiling that is applicable for the current request.
  • -
  • X-Rate-Limit-Remaining - the number of requests left for the current rate-limit window.
  • -
  • X-Rate-Limit-Reset - the time at which the rate limit resets, specified in UTC time.
  • -
  • Retry-After - indicate when a client should retry requests (when the rate limit expires), in UTC time.
  • -
- - - - -

Comments

- - +

Argo Server - Argo Workflows - The workflow engine for Kubernetes

+

This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

+

You should be redirected there automatically. Please click the link above if you are not redirected.

- - - - Back to top - -
- - - -
-
-
+
+ - - - - - - \ No newline at end of file diff --git a/artifact-repository-ref/index.html b/artifact-repository-ref/index.html index 4031d911e2a6..c796bc427907 100644 --- a/artifact-repository-ref/index.html +++ b/artifact-repository-ref/index.html @@ -1,3950 +1,68 @@ - - - - - - - - - - - - - Artifact Repository Ref - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Artifact Repository Ref - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - -
- - - - Skip to content - - -
-
- -
- - - - - - -
- - - - - +
- - - - - - - -
- - - -
- - - -
-
-
- - - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - +
+
+
+
- - - - - - - - -

Artifact Repository Ref

-
-

v2.9 and after

-
-

You can reduce duplication in your templates by configuring repositories that can be accessed by any workflow. This can also remove sensitive information from your templates.

-

Create a suitable config map in either (a) your workflows namespace or (b) in the managed namespace:

-
apiVersion: v1
-kind: ConfigMap
-metadata:
-  # If you want to use this config map by default, name it "artifact-repositories". Otherwise, you can provide a reference to a
-  # different config map in `artifactRepositoryRef.configMap`.
-  name: my-artifact-repository
-  annotations:
-    # v3.0 and after - if you want to use a specific key, put that key into this annotation.
-    workflows.argoproj.io/default-artifact-repository: default-v1-s3-artifact-repository
-data:
-  default-v1-s3-artifact-repository: |
-    s3:
-      bucket: my-bucket
-      endpoint: minio:9000
-      insecure: true
-      accessKeySecret:
-        name: my-minio-cred
-        key: accesskey
-      secretKeySecret:
-        name: my-minio-cred
-        key: secretkey
-  v2-s3-artifact-repository: |
-    s3:
-      ...
-
-

You can override the artifact repository for a workflow as follows:

-
spec:
-  artifactRepositoryRef:
-    configMap: my-artifact-repository # default is "artifact-repositories"
-    key: v2-s3-artifact-repository # default can be set by the `workflows.argoproj.io/default-artifact-repository` annotation in config map.
-
-

This feature gives maximum benefit when used with key-only artifacts.

-

Reference.

- - - - -

Comments

- - +

Artifact Repository Ref - Argo Workflows - The workflow engine for Kubernetes

+

This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

+

You should be redirected there automatically. Please click the link above if you are not redirected.

- - - - Back to top - -
- - - -
-
-
+
+ - - - - - - \ No newline at end of file diff --git a/artifact-visualization/index.html b/artifact-visualization/index.html index ac6b7c807762..dec0bb980084 100644 --- a/artifact-visualization/index.html +++ b/artifact-visualization/index.html @@ -1,4131 +1,68 @@ - - - - - - - - - - - - - Artifact Visualization - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Artifact Visualization - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - -
- - - - Skip to content - - -
-
- -
- - - - - - -
- - - - - +
- - - - - - - -
- - - -
- - - -
-
-
- - - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - +
+
+
+
- - - - - - - - -

Artifact Visualization

-
-

since v3.4

-
-

Artifacts can be viewed in the UI.

-

Use cases:

-
    -
  • Comparing ML pipeline runs from generated charts.
  • -
  • Visualizing end results of ML pipeline runs.
  • -
  • Debugging workflows where visual artifacts are the most helpful.
  • -
-

Demo

-
    -
  • Artifacts appear as elements in the workflow DAG that you can click on.
  • -
  • When you click on the artifact, a panel appears.
  • -
  • The first time this appears explanatory text is shown to help you understand if you might need to change your - workflows to use this feature.
  • -
  • Known file types such as images, text or HTML are shown in an inline-frame (iframe).
  • -
  • Artifacts are sandboxed using a Content-Security-Policy that prevents JavaScript execution.
  • -
  • JSON is shown with syntax highlighting.
  • -
-

To start, take a look at the example.

-

Graph Report -Test Report

-

Artifact Types

-

An artifact maybe a .tgz, file or directory.

-

.tgz

-

Viewing of .tgz is not supported in the UI. By default artifacts are compressed as a .tgz. Only artifacts that were -not compressed can be viewed.

-

To prevent compression, set archive to none to prevent compression:

-
- name: artifact
-  # ...
-  archive:
-    none: { }
-
-

File

-

Files maybe shown in the UI. To determine if a file can be shown, the UI checks if the artifact's file extension is -supported. The extension is found in the artifact's key.

-

To view a file, add the extension to the key:

-
- name: single-file
-  s3:
-    key: visualization.png
-
-

Directory

-

Directories are shown in the UI. The UI considers any key with a trailing-slash to be a directory.

-

To view a directory, add a trailing-slash:

-
- name: reports
-  s3:
-    key: reports/
-
-

If the directory contains index.html, then that will be shown, otherwise a directory listing is displayed.

-

⚠️ HTML files may contain CSS and images served from the same origin. Scripts are not allowed. Nothing may be remotely -loaded.

-

Security

-

Content Security Policy

-

We assume that artifacts are not trusted, so by default, artifacts are served with a Content-Security-Policy that -disables JavaScript and remote files.

-

This is similar to what happens when you include third-party scripts, such as analytic tracking, in your website. -However, those tracking codes are normally served from a different domain to your main website. Artifacts are served -from the same origin, so normal browser controls are not secure enough.

-

Sub-Path Access

-

Previously, users could access the artifacts of any workflows they could access. To allow HTML files to link to other files -within their tree, you can now access any sub-paths of the artifact's key.

-

Example:

-

The artifact produces a folder in an S3 bucket named my-bucket, with a key report/. You can also access anything -matching report/*.

- - - - -

Comments

- - +

Artifact Visualization - Argo Workflows - The workflow engine for Kubernetes

+

This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

+

You should be redirected there automatically. Please click the link above if you are not redirected.

- - - - Back to top - -
- - - -
-
-
+
+ - - - - - - \ No newline at end of file diff --git a/assets/javascripts/bundle.960e086b.min.js b/assets/javascripts/bundle.960e086b.min.js deleted file mode 100644 index dcec588e4a85..000000000000 --- a/assets/javascripts/bundle.960e086b.min.js +++ /dev/null @@ -1,29 +0,0 @@ -(()=>{var Ni=Object.create;var Tt=Object.defineProperty;var qi=Object.getOwnPropertyDescriptor;var Qi=Object.getOwnPropertyNames,_t=Object.getOwnPropertySymbols,Ki=Object.getPrototypeOf,hr=Object.prototype.hasOwnProperty,Jr=Object.prototype.propertyIsEnumerable;var Xr=(e,t,r)=>t in e?Tt(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,F=(e,t)=>{for(var r in t||(t={}))hr.call(t,r)&&Xr(e,r,t[r]);if(_t)for(var r of _t(t))Jr.call(t,r)&&Xr(e,r,t[r]);return e};var Yi=e=>Tt(e,"__esModule",{value:!0});var Zr=(e,t)=>{var r={};for(var n in e)hr.call(e,n)&&t.indexOf(n)<0&&(r[n]=e[n]);if(e!=null&&_t)for(var n of _t(e))t.indexOf(n)<0&&Jr.call(e,n)&&(r[n]=e[n]);return r};var ht=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Bi=(e,t,r,n)=>{if(t&&typeof t=="object"||typeof t=="function")for(let o of Qi(t))!hr.call(e,o)&&(r||o!=="default")&&Tt(e,o,{get:()=>t[o],enumerable:!(n=qi(t,o))||n.enumerable});return e},ze=(e,t)=>Bi(Yi(Tt(e!=null?Ni(Ki(e)):{},"default",!t&&e&&e.__esModule?{get:()=>e.default,enumerable:!0}:{value:e,enumerable:!0})),e);var tn=ht((br,en)=>{(function(e,t){typeof br=="object"&&typeof en!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(br,function(){"use strict";function e(r){var n=!0,o=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function c(E){return!!(E&&E!==document&&E.nodeName!=="HTML"&&E.nodeName!=="BODY"&&"classList"in E&&"contains"in E.classList)}function s(E){var Ce=E.type,ke=E.tagName;return!!(ke==="INPUT"&&a[Ce]&&!E.readOnly||ke==="TEXTAREA"&&!E.readOnly||E.isContentEditable)}function u(E){E.classList.contains("focus-visible")||(E.classList.add("focus-visible"),E.setAttribute("data-focus-visible-added",""))}function f(E){!E.hasAttribute("data-focus-visible-added")||(E.classList.remove("focus-visible"),E.removeAttribute("data-focus-visible-added"))}function l(E){E.metaKey||E.altKey||E.ctrlKey||(c(r.activeElement)&&u(r.activeElement),n=!0)}function p(E){n=!1}function d(E){!c(E.target)||(n||s(E.target))&&u(E.target)}function h(E){!c(E.target)||(E.target.classList.contains("focus-visible")||E.target.hasAttribute("data-focus-visible-added"))&&(o=!0,window.clearTimeout(i),i=window.setTimeout(function(){o=!1},100),f(E.target))}function b(E){document.visibilityState==="hidden"&&(o&&(n=!0),I())}function I(){document.addEventListener("mousemove",H),document.addEventListener("mousedown",H),document.addEventListener("mouseup",H),document.addEventListener("pointermove",H),document.addEventListener("pointerdown",H),document.addEventListener("pointerup",H),document.addEventListener("touchmove",H),document.addEventListener("touchstart",H),document.addEventListener("touchend",H)}function Y(){document.removeEventListener("mousemove",H),document.removeEventListener("mousedown",H),document.removeEventListener("mouseup",H),document.removeEventListener("pointermove",H),document.removeEventListener("pointerdown",H),document.removeEventListener("pointerup",H),document.removeEventListener("touchmove",H),document.removeEventListener("touchstart",H),document.removeEventListener("touchend",H)}function H(E){E.target.nodeName&&E.target.nodeName.toLowerCase()==="html"||(n=!1,Y())}document.addEventListener("keydown",l,!0),document.addEventListener("mousedown",p,!0),document.addEventListener("pointerdown",p,!0),document.addEventListener("touchstart",p,!0),document.addEventListener("visibilitychange",b,!0),I(),r.addEventListener("focus",d,!0),r.addEventListener("blur",h,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var rn=ht(vr=>{(function(e){var t=function(){try{return!!Symbol.iterator}catch(u){return!1}},r=t(),n=function(u){var f={next:function(){var l=u.shift();return{done:l===void 0,value:l}}};return r&&(f[Symbol.iterator]=function(){return f}),f},o=function(u){return encodeURIComponent(u).replace(/%20/g,"+")},i=function(u){return decodeURIComponent(String(u).replace(/\+/g," "))},a=function(){var u=function(l){Object.defineProperty(this,"_entries",{writable:!0,value:{}});var p=typeof l;if(p!=="undefined")if(p==="string")l!==""&&this._fromString(l);else if(l instanceof u){var d=this;l.forEach(function(Y,H){d.append(H,Y)})}else if(l!==null&&p==="object")if(Object.prototype.toString.call(l)==="[object Array]")for(var h=0;hd[0]?1:0}),u._entries&&(u._entries={});for(var l=0;l1?i(d[1]):"")}})})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:vr);(function(e){var t=function(){try{var o=new e.URL("b","http://a");return o.pathname="c d",o.href==="http://a/c%20d"&&o.searchParams}catch(i){return!1}},r=function(){var o=e.URL,i=function(s,u){typeof s!="string"&&(s=String(s)),u&&typeof u!="string"&&(u=String(u));var f=document,l;if(u&&(e.location===void 0||u!==e.location.href)){u=u.toLowerCase(),f=document.implementation.createHTMLDocument(""),l=f.createElement("base"),l.href=u,f.head.appendChild(l);try{if(l.href.indexOf(u)!==0)throw new Error(l.href)}catch(E){throw new Error("URL unable to set base "+u+" due to "+E)}}var p=f.createElement("a");p.href=s,l&&(f.body.appendChild(p),p.href=p.href);var d=f.createElement("input");if(d.type="url",d.value=s,p.protocol===":"||!/:/.test(p.href)||!d.checkValidity()&&!u)throw new TypeError("Invalid URL");Object.defineProperty(this,"_anchorElement",{value:p});var h=new e.URLSearchParams(this.search),b=!0,I=!0,Y=this;["append","delete","set"].forEach(function(E){var Ce=h[E];h[E]=function(){Ce.apply(h,arguments),b&&(I=!1,Y.search=h.toString(),I=!0)}}),Object.defineProperty(this,"searchParams",{value:h,enumerable:!0});var H=void 0;Object.defineProperty(this,"_updateSearchParams",{enumerable:!1,configurable:!1,writable:!1,value:function(){this.search!==H&&(H=this.search,I&&(b=!1,this.searchParams._fromString(this.search),b=!0))}})},a=i.prototype,c=function(s){Object.defineProperty(a,s,{get:function(){return this._anchorElement[s]},set:function(u){this._anchorElement[s]=u},enumerable:!0})};["hash","host","hostname","port","protocol"].forEach(function(s){c(s)}),Object.defineProperty(a,"search",{get:function(){return this._anchorElement.search},set:function(s){this._anchorElement.search=s,this._updateSearchParams()},enumerable:!0}),Object.defineProperties(a,{toString:{get:function(){var s=this;return function(){return s.href}}},href:{get:function(){return this._anchorElement.href.replace(/\?$/,"")},set:function(s){this._anchorElement.href=s,this._updateSearchParams()},enumerable:!0},pathname:{get:function(){return this._anchorElement.pathname.replace(/(^\/?)/,"/")},set:function(s){this._anchorElement.pathname=s},enumerable:!0},origin:{get:function(){var s={"http:":80,"https:":443,"ftp:":21}[this._anchorElement.protocol],u=this._anchorElement.port!=s&&this._anchorElement.port!=="";return this._anchorElement.protocol+"//"+this._anchorElement.hostname+(u?":"+this._anchorElement.port:"")},enumerable:!0},password:{get:function(){return""},set:function(s){},enumerable:!0},username:{get:function(){return""},set:function(s){},enumerable:!0}}),i.createObjectURL=function(s){return o.createObjectURL.apply(o,arguments)},i.revokeObjectURL=function(s){return o.revokeObjectURL.apply(o,arguments)},e.URL=i};if(t()||r(),e.location!==void 0&&!("origin"in e.location)){var n=function(){return e.location.protocol+"//"+e.location.hostname+(e.location.port?":"+e.location.port:"")};try{Object.defineProperty(e.location,"origin",{get:n,enumerable:!0})}catch(o){setInterval(function(){e.location.origin=n()},100)}}})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:vr)});var On=ht((ws,At)=>{/*! ***************************************************************************** -Copyright (c) Microsoft Corporation. - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY -AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM -LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR -OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -PERFORMANCE OF THIS SOFTWARE. -***************************************************************************** */var nn,on,an,sn,cn,un,fn,pn,ln,Mt,gr,mn,dn,hn,Je,bn,vn,gn,yn,xn,wn,Sn,En,Lt;(function(e){var t=typeof global=="object"?global:typeof self=="object"?self:typeof this=="object"?this:{};typeof define=="function"&&define.amd?define("tslib",["exports"],function(n){e(r(t,r(n)))}):typeof At=="object"&&typeof At.exports=="object"?e(r(t,r(At.exports))):e(r(t));function r(n,o){return n!==t&&(typeof Object.create=="function"?Object.defineProperty(n,"__esModule",{value:!0}):n.__esModule=!0),function(i,a){return n[i]=o?o(i,a):a}}})(function(e){var t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(n,o){n.__proto__=o}||function(n,o){for(var i in o)Object.prototype.hasOwnProperty.call(o,i)&&(n[i]=o[i])};nn=function(n,o){if(typeof o!="function"&&o!==null)throw new TypeError("Class extends value "+String(o)+" is not a constructor or null");t(n,o);function i(){this.constructor=n}n.prototype=o===null?Object.create(o):(i.prototype=o.prototype,new i)},on=Object.assign||function(n){for(var o,i=1,a=arguments.length;i=0;f--)(u=n[f])&&(s=(c<3?u(s):c>3?u(o,i,s):u(o,i))||s);return c>3&&s&&Object.defineProperty(o,i,s),s},cn=function(n,o){return function(i,a){o(i,a,n)}},un=function(n,o){if(typeof Reflect=="object"&&typeof Reflect.metadata=="function")return Reflect.metadata(n,o)},fn=function(n,o,i,a){function c(s){return s instanceof i?s:new i(function(u){u(s)})}return new(i||(i=Promise))(function(s,u){function f(d){try{p(a.next(d))}catch(h){u(h)}}function l(d){try{p(a.throw(d))}catch(h){u(h)}}function p(d){d.done?s(d.value):c(d.value).then(f,l)}p((a=a.apply(n,o||[])).next())})},pn=function(n,o){var i={label:0,sent:function(){if(s[0]&1)throw s[1];return s[1]},trys:[],ops:[]},a,c,s,u;return u={next:f(0),throw:f(1),return:f(2)},typeof Symbol=="function"&&(u[Symbol.iterator]=function(){return this}),u;function f(p){return function(d){return l([p,d])}}function l(p){if(a)throw new TypeError("Generator is already executing.");for(;i;)try{if(a=1,c&&(s=p[0]&2?c.return:p[0]?c.throw||((s=c.return)&&s.call(c),0):c.next)&&!(s=s.call(c,p[1])).done)return s;switch(c=0,s&&(p=[p[0]&2,s.value]),p[0]){case 0:case 1:s=p;break;case 4:return i.label++,{value:p[1],done:!1};case 5:i.label++,c=p[1],p=[0];continue;case 7:p=i.ops.pop(),i.trys.pop();continue;default:if(s=i.trys,!(s=s.length>0&&s[s.length-1])&&(p[0]===6||p[0]===2)){i=0;continue}if(p[0]===3&&(!s||p[1]>s[0]&&p[1]=n.length&&(n=void 0),{value:n&&n[a++],done:!n}}};throw new TypeError(o?"Object is not iterable.":"Symbol.iterator is not defined.")},gr=function(n,o){var i=typeof Symbol=="function"&&n[Symbol.iterator];if(!i)return n;var a=i.call(n),c,s=[],u;try{for(;(o===void 0||o-- >0)&&!(c=a.next()).done;)s.push(c.value)}catch(f){u={error:f}}finally{try{c&&!c.done&&(i=a.return)&&i.call(a)}finally{if(u)throw u.error}}return s},mn=function(){for(var n=[],o=0;o1||f(b,I)})})}function f(b,I){try{l(a[b](I))}catch(Y){h(s[0][3],Y)}}function l(b){b.value instanceof Je?Promise.resolve(b.value.v).then(p,d):h(s[0][2],b)}function p(b){f("next",b)}function d(b){f("throw",b)}function h(b,I){b(I),s.shift(),s.length&&f(s[0][0],s[0][1])}},vn=function(n){var o,i;return o={},a("next"),a("throw",function(c){throw c}),a("return"),o[Symbol.iterator]=function(){return this},o;function a(c,s){o[c]=n[c]?function(u){return(i=!i)?{value:Je(n[c](u)),done:c==="return"}:s?s(u):u}:s}},gn=function(n){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var o=n[Symbol.asyncIterator],i;return o?o.call(n):(n=typeof Mt=="function"?Mt(n):n[Symbol.iterator](),i={},a("next"),a("throw"),a("return"),i[Symbol.asyncIterator]=function(){return this},i);function a(s){i[s]=n[s]&&function(u){return new Promise(function(f,l){u=n[s](u),c(f,l,u.done,u.value)})}}function c(s,u,f,l){Promise.resolve(l).then(function(p){s({value:p,done:f})},u)}},yn=function(n,o){return Object.defineProperty?Object.defineProperty(n,"raw",{value:o}):n.raw=o,n};var r=Object.create?function(n,o){Object.defineProperty(n,"default",{enumerable:!0,value:o})}:function(n,o){n.default=o};xn=function(n){if(n&&n.__esModule)return n;var o={};if(n!=null)for(var i in n)i!=="default"&&Object.prototype.hasOwnProperty.call(n,i)&&Lt(o,n,i);return r(o,n),o},wn=function(n){return n&&n.__esModule?n:{default:n}},Sn=function(n,o,i,a){if(i==="a"&&!a)throw new TypeError("Private accessor was defined without a getter");if(typeof o=="function"?n!==o||!a:!o.has(n))throw new TypeError("Cannot read private member from an object whose class did not declare it");return i==="m"?a:i==="a"?a.call(n):a?a.value:o.get(n)},En=function(n,o,i,a,c){if(a==="m")throw new TypeError("Private method is not writable");if(a==="a"&&!c)throw new TypeError("Private accessor was defined without a setter");if(typeof o=="function"?n!==o||!c:!o.has(n))throw new TypeError("Cannot write private member to an object whose class did not declare it");return a==="a"?c.call(n,i):c?c.value=i:o.set(n,i),i},e("__extends",nn),e("__assign",on),e("__rest",an),e("__decorate",sn),e("__param",cn),e("__metadata",un),e("__awaiter",fn),e("__generator",pn),e("__exportStar",ln),e("__createBinding",Lt),e("__values",Mt),e("__read",gr),e("__spread",mn),e("__spreadArrays",dn),e("__spreadArray",hn),e("__await",Je),e("__asyncGenerator",bn),e("__asyncDelegator",vn),e("__asyncValues",gn),e("__makeTemplateObject",yn),e("__importStar",xn),e("__importDefault",wn),e("__classPrivateFieldGet",Sn),e("__classPrivateFieldSet",En)})});var zr=ht((Et,Vr)=>{/*! - * clipboard.js v2.0.8 - * https://clipboardjs.com/ - * - * Licensed MIT © Zeno Rocha - */(function(t,r){typeof Et=="object"&&typeof Vr=="object"?Vr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Et=="object"?Et.ClipboardJS=r():t.ClipboardJS=r()})(Et,function(){return function(){var e={134:function(n,o,i){"use strict";i.d(o,{default:function(){return Vi}});var a=i(279),c=i.n(a),s=i(370),u=i.n(s),f=i(817),l=i.n(f);function p(L){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?p=function(v){return typeof v}:p=function(v){return v&&typeof Symbol=="function"&&v.constructor===Symbol&&v!==Symbol.prototype?"symbol":typeof v},p(L)}function d(L,x){if(!(L instanceof x))throw new TypeError("Cannot call a class as a function")}function h(L,x){for(var v=0;v0&&arguments[0]!==void 0?arguments[0]:{};this.action=v.action,this.container=v.container,this.emitter=v.emitter,this.target=v.target,this.text=v.text,this.trigger=v.trigger,this.selectedText=""}},{key:"initSelection",value:function(){this.text?this.selectFake():this.target&&this.selectTarget()}},{key:"createFakeElement",value:function(){var v=document.documentElement.getAttribute("dir")==="rtl";this.fakeElem=document.createElement("textarea"),this.fakeElem.style.fontSize="12pt",this.fakeElem.style.border="0",this.fakeElem.style.padding="0",this.fakeElem.style.margin="0",this.fakeElem.style.position="absolute",this.fakeElem.style[v?"right":"left"]="-9999px";var $=window.pageYOffset||document.documentElement.scrollTop;return this.fakeElem.style.top="".concat($,"px"),this.fakeElem.setAttribute("readonly",""),this.fakeElem.value=this.text,this.fakeElem}},{key:"selectFake",value:function(){var v=this,$=this.createFakeElement();this.fakeHandlerCallback=function(){return v.removeFake()},this.fakeHandler=this.container.addEventListener("click",this.fakeHandlerCallback)||!0,this.container.appendChild($),this.selectedText=l()($),this.copyText(),this.removeFake()}},{key:"removeFake",value:function(){this.fakeHandler&&(this.container.removeEventListener("click",this.fakeHandlerCallback),this.fakeHandler=null,this.fakeHandlerCallback=null),this.fakeElem&&(this.container.removeChild(this.fakeElem),this.fakeElem=null)}},{key:"selectTarget",value:function(){this.selectedText=l()(this.target),this.copyText()}},{key:"copyText",value:function(){var v;try{v=document.execCommand(this.action)}catch($){v=!1}this.handleResult(v)}},{key:"handleResult",value:function(v){this.emitter.emit(v?"success":"error",{action:this.action,text:this.selectedText,trigger:this.trigger,clearSelection:this.clearSelection.bind(this)})}},{key:"clearSelection",value:function(){this.trigger&&this.trigger.focus(),document.activeElement.blur(),window.getSelection().removeAllRanges()}},{key:"destroy",value:function(){this.removeFake()}},{key:"action",set:function(){var v=arguments.length>0&&arguments[0]!==void 0?arguments[0]:"copy";if(this._action=v,this._action!=="copy"&&this._action!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"')},get:function(){return this._action}},{key:"target",set:function(v){if(v!==void 0)if(v&&p(v)==="object"&&v.nodeType===1){if(this.action==="copy"&&v.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(this.action==="cut"&&(v.hasAttribute("readonly")||v.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`);this._target=v}else throw new Error('Invalid "target" value, use a valid Element')},get:function(){return this._target}}]),L}(),Y=I;function H(L){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?H=function(v){return typeof v}:H=function(v){return v&&typeof Symbol=="function"&&v.constructor===Symbol&&v!==Symbol.prototype?"symbol":typeof v},H(L)}function E(L,x){if(!(L instanceof x))throw new TypeError("Cannot call a class as a function")}function Ce(L,x){for(var v=0;v0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof z.action=="function"?z.action:this.defaultAction,this.target=typeof z.target=="function"?z.target:this.defaultTarget,this.text=typeof z.text=="function"?z.text:this.defaultText,this.container=H(z.container)==="object"?z.container:document.body}},{key:"listenClick",value:function(z){var ee=this;this.listener=u()(z,"click",function(dt){return ee.onClick(dt)})}},{key:"onClick",value:function(z){var ee=z.delegateTarget||z.currentTarget;this.clipboardAction&&(this.clipboardAction=null),this.clipboardAction=new Y({action:this.action(ee),target:this.target(ee),text:this.text(ee),container:this.container,trigger:ee,emitter:this})}},{key:"defaultAction",value:function(z){return dr("action",z)}},{key:"defaultTarget",value:function(z){var ee=dr("target",z);if(ee)return document.querySelector(ee)}},{key:"defaultText",value:function(z){return dr("text",z)}},{key:"destroy",value:function(){this.listener.destroy(),this.clipboardAction&&(this.clipboardAction.destroy(),this.clipboardAction=null)}}],[{key:"isSupported",value:function(){var z=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],ee=typeof z=="string"?[z]:z,dt=!!document.queryCommandSupported;return ee.forEach(function(zi){dt=dt&&!!document.queryCommandSupported(zi)}),dt}}]),v}(c()),Vi=Di},828:function(n){var o=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(c,s){for(;c&&c.nodeType!==o;){if(typeof c.matches=="function"&&c.matches(s))return c;c=c.parentNode}}n.exports=a},438:function(n,o,i){var a=i(828);function c(f,l,p,d,h){var b=u.apply(this,arguments);return f.addEventListener(p,b,h),{destroy:function(){f.removeEventListener(p,b,h)}}}function s(f,l,p,d,h){return typeof f.addEventListener=="function"?c.apply(null,arguments):typeof p=="function"?c.bind(null,document).apply(null,arguments):(typeof f=="string"&&(f=document.querySelectorAll(f)),Array.prototype.map.call(f,function(b){return c(b,l,p,d,h)}))}function u(f,l,p,d){return function(h){h.delegateTarget=a(h.target,l),h.delegateTarget&&d.call(f,h)}}n.exports=s},879:function(n,o){o.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},o.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||o.node(i[0]))},o.string=function(i){return typeof i=="string"||i instanceof String},o.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(n,o,i){var a=i(879),c=i(438);function s(p,d,h){if(!p&&!d&&!h)throw new Error("Missing required arguments");if(!a.string(d))throw new TypeError("Second argument must be a String");if(!a.fn(h))throw new TypeError("Third argument must be a Function");if(a.node(p))return u(p,d,h);if(a.nodeList(p))return f(p,d,h);if(a.string(p))return l(p,d,h);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function u(p,d,h){return p.addEventListener(d,h),{destroy:function(){p.removeEventListener(d,h)}}}function f(p,d,h){return Array.prototype.forEach.call(p,function(b){b.addEventListener(d,h)}),{destroy:function(){Array.prototype.forEach.call(p,function(b){b.removeEventListener(d,h)})}}}function l(p,d,h){return c(document.body,p,d,h)}n.exports=s},817:function(n){function o(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var c=i.hasAttribute("readonly");c||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),c||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var s=window.getSelection(),u=document.createRange();u.selectNodeContents(i),s.removeAllRanges(),s.addRange(u),a=s.toString()}return a}n.exports=o},279:function(n){function o(){}o.prototype={on:function(i,a,c){var s=this.e||(this.e={});return(s[i]||(s[i]=[])).push({fn:a,ctx:c}),this},once:function(i,a,c){var s=this;function u(){s.off(i,u),a.apply(c,arguments)}return u._=a,this.on(i,u,c)},emit:function(i){var a=[].slice.call(arguments,1),c=((this.e||(this.e={}))[i]||[]).slice(),s=0,u=c.length;for(s;s{"use strict";/*! - * escape-html - * Copyright(c) 2012-2013 TJ Holowaychuk - * Copyright(c) 2015 Andreas Lubbe - * Copyright(c) 2015 Tiancheng "Timothy" Gu - * MIT Licensed - */var ns=/["'&<>]/;ui.exports=os;function os(e){var t=""+e,r=ns.exec(t);if(!r)return t;var n,o="",i=0,a=0;for(i=r.index;i0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var n=this,o=n.hasError,i=n.isStopped,a=n.observers;return o||i?yr:(a.push(r),new Te(function(){return Re(a,r)}))},t.prototype._checkFinalizedStatuses=function(r){var n=this,o=n.hasError,i=n.thrownError,a=n.isStopped;o?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new j;return r.source=this,r},t.create=function(r,n){return new jn(r,n)},t}(j);var jn=function(e){Z(t,e);function t(r,n){var o=e.call(this)||this;return o.destination=r,o.source=n,o}return t.prototype.next=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.next)===null||o===void 0||o.call(n,r)},t.prototype.error=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.error)===null||o===void 0||o.call(n,r)},t.prototype.complete=function(){var r,n;(n=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||n===void 0||n.call(r)},t.prototype._subscribe=function(r){var n,o;return(o=(n=this.source)===null||n===void 0?void 0:n.subscribe(r))!==null&&o!==void 0?o:yr},t}(S);var vt={now:function(){return(vt.delegate||Date).now()},delegate:void 0};var gt=function(e){Z(t,e);function t(r,n,o){r===void 0&&(r=1/0),n===void 0&&(n=1/0),o===void 0&&(o=vt);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=n,i._timestampProvider=o,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=n===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,n),i}return t.prototype.next=function(r){var n=this,o=n.isStopped,i=n._buffer,a=n._infiniteTimeWindow,c=n._timestampProvider,s=n._windowTime;o||(i.push(r),!a&&i.push(c.now()+s)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var n=this._innerSubscribe(r),o=this,i=o._infiniteTimeWindow,a=o._buffer,c=a.slice(),s=0;s0?e.prototype.requestAsyncId.call(this,r,n,o):(r.actions.push(this),r._scheduled||(r._scheduled=rt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,n,o){if(o===void 0&&(o=0),o!=null&&o>0||o==null&&this.delay>0)return e.prototype.recycleAsyncId.call(this,r,n,o);r.actions.some(function(i){return i.id===n})||(rt.cancelAnimationFrame(n),r._scheduled=void 0)},t}($t);var Dn=function(e){Z(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var n=this._scheduled;this._scheduled=void 0;var o=this.actions,i;r=r||o.shift();do if(i=r.execute(r.state,r.delay))break;while((r=o[0])&&r.id===n&&o.shift());if(this._active=!1,i){for(;(r=o[0])&&r.id===n&&o.shift();)r.unsubscribe();throw i}},t}(Ft);var Se=new Dn(Wn);var N=new j(function(e){return e.complete()});function jt(e){return e&&O(e.schedule)}function _r(e){return e[e.length-1]}function Ie(e){return O(_r(e))?e.pop():void 0}function ge(e){return jt(_r(e))?e.pop():void 0}function Ut(e,t){return typeof _r(e)=="number"?e.pop():t}var nt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Wt(e){return O(e==null?void 0:e.then)}function Dt(e){return O(e[tt])}function Vt(e){return Symbol.asyncIterator&&O(e==null?void 0:e[Symbol.asyncIterator])}function zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function ea(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var Nt=ea();function qt(e){return O(e==null?void 0:e[Nt])}function Qt(e){return Mn(this,arguments,function(){var r,n,o,i;return Ct(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,kt(r.read())];case 3:return n=a.sent(),o=n.value,i=n.done,i?[4,kt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,kt(o)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function Kt(e){return O(e==null?void 0:e.getReader)}function D(e){if(e instanceof j)return e;if(e!=null){if(Dt(e))return ta(e);if(nt(e))return ra(e);if(Wt(e))return na(e);if(Vt(e))return Vn(e);if(qt(e))return oa(e);if(Kt(e))return ia(e)}throw zt(e)}function ta(e){return new j(function(t){var r=e[tt]();if(O(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function ra(e){return new j(function(t){for(var r=0;r=2,!0))}function fe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new S}:t,n=e.resetOnError,o=n===void 0?!0:n,i=e.resetOnComplete,a=i===void 0?!0:i,c=e.resetOnRefCountZero,s=c===void 0?!0:c;return function(u){var f=null,l=null,p=null,d=0,h=!1,b=!1,I=function(){l==null||l.unsubscribe(),l=null},Y=function(){I(),f=p=null,h=b=!1},H=function(){var E=f;Y(),E==null||E.unsubscribe()};return g(function(E,Ce){d++,!b&&!h&&I();var ke=p=p!=null?p:r();Ce.add(function(){d--,d===0&&!b&&!h&&(l=Hr(H,s))}),ke.subscribe(Ce),f||(f=new et({next:function(Ge){return ke.next(Ge)},error:function(Ge){b=!0,I(),l=Hr(Y,o,Ge),ke.error(Ge)},complete:function(){h=!0,I(),l=Hr(Y,a),ke.complete()}}),te(E).subscribe(f))})(u)}}function Hr(e,t){for(var r=[],n=2;ne.next(document)),e}function G(e,t=document){return Array.from(t.querySelectorAll(e))}function K(e,t=document){let r=ce(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function ce(e,t=document){return t.querySelector(e)||void 0}function Ue(){return document.activeElement instanceof HTMLElement&&document.activeElement||void 0}function Xt(e){return A(w(document.body,"focusin"),w(document.body,"focusout")).pipe(Ke(1),m(()=>{let t=Ue();return typeof t!="undefined"?e.contains(t):!1}),V(e===Ue()),Q())}function We(e){return{x:e.offsetLeft,y:e.offsetTop}}function co(e){return A(w(window,"load"),w(window,"resize")).pipe(He(0,Se),m(()=>We(e)),V(We(e)))}function uo(e){return{x:e.scrollLeft,y:e.scrollTop}}function Zt(e){return A(w(e,"scroll"),w(window,"resize")).pipe(He(0,Se),m(()=>uo(e)),V(uo(e)))}var fo=function(){if(typeof Map!="undefined")return Map;function e(t,r){var n=-1;return t.some(function(o,i){return o[0]===r?(n=i,!0):!1}),n}return function(){function t(){this.__entries__=[]}return Object.defineProperty(t.prototype,"size",{get:function(){return this.__entries__.length},enumerable:!0,configurable:!0}),t.prototype.get=function(r){var n=e(this.__entries__,r),o=this.__entries__[n];return o&&o[1]},t.prototype.set=function(r,n){var o=e(this.__entries__,r);~o?this.__entries__[o][1]=n:this.__entries__.push([r,n])},t.prototype.delete=function(r){var n=this.__entries__,o=e(n,r);~o&&n.splice(o,1)},t.prototype.has=function(r){return!!~e(this.__entries__,r)},t.prototype.clear=function(){this.__entries__.splice(0)},t.prototype.forEach=function(r,n){n===void 0&&(n=null);for(var o=0,i=this.__entries__;o0},e.prototype.connect_=function(){!Ur||this.connected_||(document.addEventListener("transitionend",this.onTransitionEnd_),window.addEventListener("resize",this.refresh),Ma?(this.mutationsObserver_=new MutationObserver(this.refresh),this.mutationsObserver_.observe(document,{attributes:!0,childList:!0,characterData:!0,subtree:!0})):(document.addEventListener("DOMSubtreeModified",this.refresh),this.mutationEventsAdded_=!0),this.connected_=!0)},e.prototype.disconnect_=function(){!Ur||!this.connected_||(document.removeEventListener("transitionend",this.onTransitionEnd_),window.removeEventListener("resize",this.refresh),this.mutationsObserver_&&this.mutationsObserver_.disconnect(),this.mutationEventsAdded_&&document.removeEventListener("DOMSubtreeModified",this.refresh),this.mutationsObserver_=null,this.mutationEventsAdded_=!1,this.connected_=!1)},e.prototype.onTransitionEnd_=function(t){var r=t.propertyName,n=r===void 0?"":r,o=_a.some(function(i){return!!~n.indexOf(i)});o&&this.refresh()},e.getInstance=function(){return this.instance_||(this.instance_=new e),this.instance_},e.instance_=null,e}(),po=function(e,t){for(var r=0,n=Object.keys(t);r0},e}(),ho=typeof WeakMap!="undefined"?new WeakMap:new fo,bo=function(){function e(t){if(!(this instanceof e))throw new TypeError("Cannot call a class as a function.");if(!arguments.length)throw new TypeError("1 argument required, but only 0 present.");var r=La.getInstance(),n=new ja(t,r,this);ho.set(this,n)}return e}();["observe","unobserve","disconnect"].forEach(function(e){bo.prototype[e]=function(){var t;return(t=ho.get(this))[e].apply(t,arguments)}});var Ua=function(){return typeof er.ResizeObserver!="undefined"?er.ResizeObserver:bo}(),vo=Ua;var go=new S,Wa=U(()=>k(new vo(e=>{for(let t of e)go.next(t)}))).pipe(_(e=>A(ye,k(e)).pipe(C(()=>e.disconnect()))),J(1));function _e(e){return{width:e.offsetWidth,height:e.offsetHeight}}function he(e){return Wa.pipe(T(t=>t.observe(e)),_(t=>go.pipe(M(({target:r})=>r===e),C(()=>t.unobserve(e)),m(()=>_e(e)))),V(_e(e)))}function nr(e){return{width:e.scrollWidth,height:e.scrollHeight}}var Da=new S,rv=U(()=>k(new IntersectionObserver(e=>{for(let t of e)Da.next(t)},{threshold:1}))).pipe(_(e=>A(ye,k(e)).pipe(C(()=>e.disconnect()))),J(1));function yo(e,t=16){return Zt(e).pipe(m(({y:r})=>{let n=_e(e),o=nr(e);return r>=o.height-n.height-t}),Q())}var or={drawer:K("[data-md-toggle=drawer]"),search:K("[data-md-toggle=search]")};function xo(e){return or[e].checked}function De(e,t){or[e].checked!==t&&or[e].click()}function ir(e){let t=or[e];return w(t,"change").pipe(m(()=>t.checked),V(t.checked))}function Va(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function wo(){return w(window,"keydown").pipe(M(e=>!(e.metaKey||e.ctrlKey)),m(e=>({mode:xo("search")?"search":"global",type:e.key,claim(){e.preventDefault(),e.stopPropagation()}})),M(({mode:e,type:t})=>{if(e==="global"){let r=Ue();if(typeof r!="undefined")return!Va(r,t)}return!0}),fe())}function Me(){return new URL(location.href)}function So(e){location.href=e.href}function Eo(){return new S}function Oo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Oo(e,r)}function R(e,t,...r){let n=document.createElement(e);if(t)for(let o of Object.keys(t))typeof t[o]!="boolean"?n.setAttribute(o,t[o]):t[o]&&n.setAttribute(o,"");for(let o of r)Oo(n,o);return n}function To(e,t){let r=t;if(e.length>r){for(;e[r]!==" "&&--r>0;);return`${e.substring(0,r)}...`}return e}function ar(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function _o(){return location.hash.substring(1)}function Mo(e){let t=R("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function za(){return w(window,"hashchange").pipe(m(_o),V(_o()),M(e=>e.length>0),J(1))}function Lo(){return za().pipe(m(e=>ce(`[id="${e}"]`)),M(e=>typeof e!="undefined"))}function Wr(e){let t=matchMedia(e);return Gt(r=>t.addListener(()=>r(t.matches))).pipe(V(t.matches))}function Ao(){let e=matchMedia("print");return A(w(window,"beforeprint").pipe(re(!0)),w(window,"afterprint").pipe(re(!1))).pipe(V(e.matches))}function Dr(e,t){return e.pipe(_(r=>r?t():N))}function sr(e,t={credentials:"same-origin"}){return te(fetch(`${e}`,t)).pipe(M(r=>r.status===200),je(()=>N))}function Le(e,t){return sr(e,t).pipe(_(r=>r.json()),J(1))}function Co(e,t){let r=new DOMParser;return sr(e,t).pipe(_(n=>n.text()),m(n=>r.parseFromString(n,"text/xml")),J(1))}function ko(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function Ro(){return A(w(window,"scroll",{passive:!0}),w(window,"resize",{passive:!0})).pipe(m(ko),V(ko()))}function Ho(){return{width:innerWidth,height:innerHeight}}function Po(){return w(window,"resize",{passive:!0}).pipe(m(Ho),V(Ho()))}function Io(){return q([Ro(),Po()]).pipe(m(([e,t])=>({offset:e,size:t})),J(1))}function cr(e,{viewport$:t,header$:r}){let n=t.pipe(B("size")),o=q([n,r]).pipe(m(()=>We(e)));return q([r,t,o]).pipe(m(([{height:i},{offset:a,size:c},{x:s,y:u}])=>({offset:{x:a.x-s,y:a.y-u+i},size:c})))}function $o(e,{tx$:t}){let r=w(e,"message").pipe(m(({data:n})=>n));return t.pipe(St(()=>r,{leading:!0,trailing:!0}),T(n=>e.postMessage(n)),Pr(r),fe())}var Na=K("#__config"),ft=JSON.parse(Na.textContent);ft.base=`${new URL(ft.base,Me())}`;function be(){return ft}function ae(e){return ft.features.includes(e)}function X(e,t){return typeof t!="undefined"?ft.translations[e].replace("#",t.toString()):ft.translations[e]}function Ae(e,t=document){return K(`[data-md-component=${e}]`,t)}function ne(e,t=document){return G(`[data-md-component=${e}]`,t)}var Qo=ze(zr());function Fo(e){return R("aside",{class:"md-annotation",tabIndex:0},R("div",{class:"md-annotation__inner md-tooltip"},R("div",{class:"md-tooltip__inner md-typeset"})),R("span",{class:"md-annotation__index"},R("span",{"data-md-annotation-id":e})))}function jo(e){return R("button",{class:"md-clipboard md-icon",title:X("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}function Nr(e,t){let r=t&2,n=t&1,o=Object.keys(e.terms).filter(a=>!e.terms[a]).reduce((a,c)=>[...a,R("del",null,c)," "],[]).slice(0,-1),i=new URL(e.location);return ae("search.highlight")&&i.searchParams.set("h",Object.entries(e.terms).filter(([,a])=>a).reduce((a,[c])=>`${a} ${c}`.trim(),"")),R("a",{href:`${i}`,class:"md-search-result__link",tabIndex:-1},R("article",{class:["md-search-result__article",...r?["md-search-result__article--document"]:[]].join(" "),"data-md-score":e.score.toFixed(2)},r>0&&R("div",{class:"md-search-result__icon md-icon"}),R("h1",{class:"md-search-result__title"},e.title),n>0&&e.text.length>0&&R("p",{class:"md-search-result__teaser"},To(e.text,320)),n>0&&o.length>0&&R("p",{class:"md-search-result__terms"},X("search.result.term.missing"),": ",o)))}function Uo(e){let t=e[0].score,r=[...e],n=r.findIndex(u=>!u.location.includes("#")),[o]=r.splice(n,1),i=r.findIndex(u=>u.scoreNr(u,1)),...c.length?[R("details",{class:"md-search-result__more"},R("summary",{tabIndex:-1},c.length>0&&c.length===1?X("search.result.more.one"):X("search.result.more.other",c.length)),c.map(u=>Nr(u,1)))]:[]];return R("li",{class:"md-search-result__item"},s)}function Wo(e){return R("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>R("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?ar(r):r)))}function Do(e){return R("div",{class:"md-typeset__scrollwrap"},R("div",{class:"md-typeset__table"},e))}function qa(e){let t=be(),r=new URL(`../${e.version}/`,t.base);return R("li",{class:"md-version__item"},R("a",{href:r.toString(),class:"md-version__link"},e.title))}function Vo(e,t){return R("div",{class:"md-version"},R("button",{class:"md-version__current","aria-label":X("select.version.title")},t.title),R("ul",{class:"md-version__list"},e.map(qa)))}function Qa(e,t){let r=U(()=>q([co(e),Zt(t)])).pipe(m(([{x:n,y:o},i])=>{let{width:a}=_e(e);return{x:n-i.x+a/2,y:o-i.y}}));return Xt(e).pipe(_(n=>r.pipe(m(o=>({active:n,offset:o})),le(+!n||1/0))))}function zo(e,t){return U(()=>{let r=new S;r.subscribe({next({offset:i}){e.style.setProperty("--md-tooltip-x",`${i.x}px`),e.style.setProperty("--md-tooltip-y",`${i.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),r.pipe(Fr(500,Se),m(()=>t.getBoundingClientRect()),m(({x:i})=>i)).subscribe({next(i){i?e.style.setProperty("--md-tooltip-0",`${-i}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}});let n=K(":scope > :last-child",e),o=w(n,"mousedown",{once:!0});return r.pipe(_(({active:i})=>i?o:N),T(i=>i.preventDefault())).subscribe(()=>e.blur()),Qa(e,t).pipe(T(i=>r.next(i)),C(()=>r.complete()),m(i=>F({ref:e},i)))})}function Ka(e){let t=[];for(let r of G(".c, .c1, .cm",e)){let n,o=r.firstChild;for(;n=/\((\d+)\)/.exec(o.textContent);){let i=o.splitText(n.index);o=i.splitText(n[0].length),t.push(i)}}return t}function No(e,t){t.append(...Array.from(e.childNodes))}function qo(e,t,{print$:r}){let n=new Map;for(let o of Ka(t)){let[,i]=o.textContent.match(/\((\d+)\)/);ce(`li:nth-child(${i})`,e)&&(n.set(+i,Fo(+i)),o.replaceWith(n.get(+i)))}return n.size===0?N:U(()=>{let o=new S;return r.pipe(se(o.pipe(ue(1)))).subscribe(i=>{e.hidden=!i;for(let[a,c]of n){let s=K(".md-typeset",c),u=K(`li:nth-child(${a})`,e);i?No(s,u):No(u,s)}}),A(...[...n].map(([,i])=>zo(i,t))).pipe(C(()=>o.complete()),fe())})}var Ya=0;function Ko(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return Ko(t)}}function Yo(e){return he(e).pipe(m(({width:t})=>({scrollable:nr(e).width>t})),B("scrollable"))}function Bo(e,t){let{matches:r}=matchMedia("(hover)");return U(()=>{let n=new S;if(n.subscribe(({scrollable:i})=>{i&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")}),Qo.default.isSupported()){let i=e.closest("pre");i.id=`__code_${++Ya}`,i.insertBefore(jo(i.id),e)}let o=e.closest([":not(td):not(.code) > .highlight",".highlighttable"].join(", "));if(o instanceof HTMLElement){let i=Ko(o);if(typeof i!="undefined"&&(o.classList.contains("annotate")||ae("content.code.annotate"))){let a=qo(i,e,t);return Yo(e).pipe(T(c=>n.next(c)),C(()=>n.complete()),m(c=>F({ref:e},c)),Be(he(o).pipe(se(n.pipe(ue(1))),m(({width:c,height:s})=>c&&s),Q(),_(c=>c?a:N))))}}return Yo(e).pipe(T(i=>n.next(i)),C(()=>n.complete()),m(i=>F({ref:e},i)))})}function Ba(e,{target$:t,print$:r}){let n=!0;return A(t.pipe(m(o=>o.closest("details:not([open])")),M(o=>e===o),re({action:"open",reveal:!0})),r.pipe(M(o=>o||!n),T(()=>n=e.open),m(o=>({action:o?"open":"close"}))))}function Go(e,t){return U(()=>{let r=new S;return r.subscribe(({action:n,reveal:o})=>{n==="open"?e.setAttribute("open",""):e.removeAttribute("open"),o&&e.scrollIntoView()}),Ba(e,t).pipe(T(n=>r.next(n)),C(()=>r.complete()),m(n=>F({ref:e},n)))})}var Jo=R("table");function Xo(e){return e.replaceWith(Jo),Jo.replaceWith(Do(e)),k({ref:e})}function Ga(e){let t=G(":scope > input",e);return A(...t.map(r=>w(r,"change").pipe(re({active:K(`label[for=${r.id}]`)})))).pipe(V({active:K(`label[for=${t[0].id}]`)}))}function Zo(e){let t=K(".tabbed-labels",e);return U(()=>{let r=new S;return q([r,he(e)]).pipe(He(1,Se),se(r.pipe(ue(1)))).subscribe({next([{active:n}]){let o=We(n),{width:i}=_e(n);e.style.setProperty("--md-indicator-x",`${o.x}px`),e.style.setProperty("--md-indicator-width",`${i}px`),t.scrollTo({behavior:"smooth",left:o.x})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),Ga(e).pipe(T(n=>r.next(n)),C(()=>r.complete()),m(n=>F({ref:e},n)))})}function ei(e,{target$:t,print$:r}){return A(...G("pre > code",e).map(n=>Bo(n,{print$:r})),...G("table:not([class])",e).map(n=>Xo(n)),...G("details",e).map(n=>Go(n,{target$:t,print$:r})),...G("[data-tabs]",e).map(n=>Zo(n)))}function Ja(e,{alert$:t}){return t.pipe(_(r=>A(k(!0),k(!1).pipe(Pe(2e3))).pipe(m(n=>({message:r,active:n})))))}function ti(e,t){let r=K(".md-typeset",e);return U(()=>{let n=new S;return n.subscribe(({message:o,active:i})=>{r.textContent=o,i?e.setAttribute("data-md-state","open"):e.removeAttribute("data-md-state")}),Ja(e,t).pipe(T(o=>n.next(o)),C(()=>n.complete()),m(o=>F({ref:e},o)))})}function Xa({viewport$:e}){if(!ae("header.autohide"))return k(!1);let t=e.pipe(m(({offset:{y:o}})=>o),Ee(2,1),m(([o,i])=>[oMath.abs(i-o.y)>100),m(([,[o]])=>o),Q()),n=ir("search");return q([e,n]).pipe(m(([{offset:o},i])=>o.y>400&&!i),Q(),_(o=>o?r:k(!1)),V(!1))}function ri(e,t){return U(()=>{let r=getComputedStyle(e);return k(r.position==="sticky"||r.position==="-webkit-sticky")}).pipe(at(he(e),Xa(t)),m(([r,{height:n},o])=>({height:r?n:0,sticky:r,hidden:o})),Q((r,n)=>r.sticky===n.sticky&&r.height===n.height&&r.hidden===n.hidden),J(1))}function ni(e,{header$:t,main$:r}){return U(()=>{let n=new S;return n.pipe(B("active"),at(t)).subscribe(([{active:o},{hidden:i}])=>{o?e.setAttribute("data-md-state",i?"hidden":"shadow"):e.removeAttribute("data-md-state")}),r.subscribe(n),t.pipe(se(n.pipe(ue(1))),m(o=>F({ref:e},o)))})}function Za(e,{viewport$:t,header$:r}){return cr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:n}})=>{let{height:o}=_e(e);return{active:n>=o}}),B("active"))}function oi(e,t){return U(()=>{let r=new S;r.subscribe(({active:o})=>{o?e.setAttribute("data-md-state","active"):e.removeAttribute("data-md-state")});let n=ce("article h1");return typeof n=="undefined"?N:Za(n,t).pipe(T(o=>r.next(o)),C(()=>r.complete()),m(o=>F({ref:e},o)))})}function ii(e,{viewport$:t,header$:r}){let n=r.pipe(m(({height:i})=>i),Q()),o=n.pipe(_(()=>he(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),B("bottom"))));return q([n,o,t]).pipe(m(([i,{top:a,bottom:c},{offset:{y:s},size:{height:u}}])=>(u=Math.max(0,u-Math.max(0,a-s,i)-Math.max(0,u+s-c)),{offset:a-i,height:u,active:a-i<=s})),Q((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function es(e){let t=__md_get("__palette")||{index:e.findIndex(r=>matchMedia(r.getAttribute("data-md-color-media")).matches)};return k(...e).pipe(ie(r=>w(r,"change").pipe(re(r))),V(e[Math.max(0,t.index)]),m(r=>({index:e.indexOf(r),color:{scheme:r.getAttribute("data-md-color-scheme"),primary:r.getAttribute("data-md-color-primary"),accent:r.getAttribute("data-md-color-accent")}})),J(1))}function ai(e){return U(()=>{let t=new S;t.subscribe(n=>{for(let[o,i]of Object.entries(n.color))document.body.setAttribute(`data-md-color-${o}`,i);for(let o=0;ot.next(n)),C(()=>t.complete()),m(n=>F({ref:e},n)))})}var qr=ze(zr());function ts(e){e.setAttribute("data-md-copying","");let t=e.innerText;return e.removeAttribute("data-md-copying"),t}function si({alert$:e}){qr.default.isSupported()&&new j(t=>{new qr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||ts(K(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(T(t=>{t.trigger.focus()}),re(X("clipboard.copied"))).subscribe(e)}function rs(e){if(e.length<2)return e;let[t,r]=e.sort((i,a)=>i.length-a.length).map(i=>i.replace(/[^/]+$/,"")),n=0;if(t===r)n=t.length;else for(;t.charCodeAt(n)===r.charCodeAt(n);)n++;let o=be();return e.map(i=>i.replace(t.slice(0,n),o.base))}function ci({document$:e,location$:t,viewport$:r}){let n=be();if(location.protocol==="file:")return;"scrollRestoration"in history&&(history.scrollRestoration="manual",w(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}));let o=ce("link[rel=icon]");typeof o!="undefined"&&(o.href=o.href);let i=Co(new URL("sitemap.xml",n.base)).pipe(m(u=>rs(G("loc",u).map(f=>f.textContent))),_(u=>w(document.body,"click").pipe(M(f=>!f.metaKey&&!f.ctrlKey),_(f=>{if(f.target instanceof Element){let l=f.target.closest("a");if(l&&!l.target){let p=new URL(l.href);if(p.search="",p.hash="",p.pathname!==location.pathname&&u.includes(p.toString()))return f.preventDefault(),k({url:new URL(l.href)})}}return ye}))),fe()),a=w(window,"popstate").pipe(M(u=>u.state!==null),m(u=>({url:new URL(location.href),offset:u.state})),fe());A(i,a).pipe(Q((u,f)=>u.url.href===f.url.href),m(({url:u})=>u)).subscribe(t);let c=t.pipe(B("pathname"),_(u=>sr(u.href).pipe(je(()=>(So(u),ye)))),fe());i.pipe(st(c)).subscribe(({url:u})=>{history.pushState({},"",`${u}`)});let s=new DOMParser;c.pipe(_(u=>u.text()),m(u=>s.parseFromString(u,"text/html"))).subscribe(e),e.pipe(ct(1)).subscribe(u=>{for(let f of["title","link[rel=canonical]","meta[name=author]","meta[name=description]","[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=logo]","[data-md-component=skip]",...ae("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let l=ce(f),p=ce(f,u);typeof l!="undefined"&&typeof p!="undefined"&&l.replaceWith(p)}}),e.pipe(ct(1),m(()=>Ae("container")),_(u=>k(...G("script",u))),Ar(u=>{let f=R("script");if(u.src){for(let l of u.getAttributeNames())f.setAttribute(l,u.getAttribute(l));return u.replaceWith(f),new j(l=>{f.onload=()=>l.complete()})}else return f.textContent=u.textContent,u.replaceWith(f),N})).subscribe(),A(i,a).pipe(st(e)).subscribe(({url:u,offset:f})=>{u.hash&&!f?Mo(u.hash):window.scrollTo(0,(f==null?void 0:f.y)||0)}),r.pipe(wt(i),Ke(250),B("offset")).subscribe(({offset:u})=>{history.replaceState(u,"")}),A(i,a).pipe(Ee(2,1),M(([u,f])=>u.url.pathname===f.url.pathname),m(([,u])=>u)).subscribe(({offset:u})=>{window.scrollTo(0,(u==null?void 0:u.y)||0)})}var is=ze(Qr());var fi=ze(Qr());function Kr(e,t){let r=new RegExp(e.separator,"img"),n=(o,i,a)=>`${i}${a}`;return o=>{o=o.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator})(${o.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(t?(0,fi.default)(a):a).replace(i,n).replace(/<\/mark>(\s+)]*>/img,"$1")}}function pi(e){return e.split(/"([^"]+)"/g).map((t,r)=>r&1?t.replace(/^\b|^(?![^\x00-\x7F]|$)|\s+/g," +"):t).join("").replace(/"|(?:^|\s+)[*+\-:^~]+(?=\s+|$)/g,"").trim()}function pt(e){return e.type===1}function li(e){return e.type===2}function lt(e){return e.type===3}function ss({config:e,docs:t}){e.lang.length===1&&e.lang[0]==="en"&&(e.lang=[X("search.config.lang")]),e.separator==="[\\s\\-]+"&&(e.separator=X("search.config.separator"));let n={pipeline:X("search.config.pipeline").split(/\s*,\s*/).filter(Boolean),suggestions:ae("search.suggest")};return{config:e,docs:t,options:n}}function mi(e,t){let r=be(),n=new Worker(e),o=new S,i=$o(n,{tx$:o}).pipe(m(a=>{if(lt(a))for(let c of a.data.items)for(let s of c)s.location=`${new URL(s.location,r.base)}`;return a}),fe());return te(t).pipe(m(a=>({type:0,data:ss(a)}))).subscribe(o.next.bind(o)),{tx$:o,rx$:i}}function di(){let e=be(),t=Le(new URL("../versions.json",e.base)),r=t.pipe(m(n=>{let[,o]=e.base.match(/([^/]+)\/?$/);return n.find(({version:i,aliases:a})=>i===o||a.includes(o))||n[0]}));q([t,r]).subscribe(([n,o])=>{var a;if(K(".md-header__topic").appendChild(Vo(n,o)),__md_get("__outdated",sessionStorage)===null){let c=((a=e.version)==null?void 0:a.default)||"latest",s=!o.aliases.includes(c);if(__md_set("__outdated",s,sessionStorage),s)for(let u of ne("outdated"))u.hidden=!1}})}function cs(e,{rx$:t}){let r=(__search==null?void 0:__search.transform)||pi,{searchParams:n}=Me();n.has("q")&&De("search",!0);let o=t.pipe(M(pt),le(1),m(()=>n.get("q")||""));o.subscribe(c=>{c&&(e.value=c)});let i=Xt(e),a=A(w(e,"keyup"),w(e,"focus").pipe(Pe(1)),o).pipe(m(()=>r(e.value)),V(""),Q());return q([a,i]).pipe(m(([c,s])=>({value:c,focus:s})),J(1))}function hi(e,{tx$:t,rx$:r}){let n=new S;return n.pipe(B("value"),m(({value:o})=>({type:2,data:o}))).subscribe(t.next.bind(t)),n.pipe(B("focus")).subscribe(({focus:o})=>{o?(De("search",o),e.placeholder=""):e.placeholder=X("search.placeholder")}),w(e.form,"reset").pipe(se(n.pipe(ue(1)))).subscribe(()=>e.focus()),cs(e,{tx$:t,rx$:r}).pipe(T(o=>n.next(o)),C(()=>n.complete()),m(o=>F({ref:e},o)))}function bi(e,{rx$:t},{query$:r}){let n=new S,o=yo(e.parentElement).pipe(M(Boolean)),i=K(":scope > :first-child",e),a=K(":scope > :last-child",e),c=t.pipe(M(pt),le(1));return n.pipe(Oe(r),wt(c)).subscribe(([{items:u},{value:f}])=>{if(f)switch(u.length){case 0:i.textContent=X("search.result.none");break;case 1:i.textContent=X("search.result.one");break;default:i.textContent=X("search.result.other",ar(u.length))}else i.textContent=X("search.result.placeholder")}),n.pipe(T(()=>a.innerHTML=""),_(({items:u})=>A(k(...u.slice(0,10)),k(...u.slice(10)).pipe(Ee(4),jr(o),_(([f])=>k(...f)))))).subscribe(u=>a.appendChild(Uo(u))),t.pipe(M(lt),m(({data:u})=>u)).pipe(T(u=>n.next(u)),C(()=>n.complete()),m(u=>F({ref:e},u)))}function us(e,{query$:t}){return t.pipe(m(({value:r})=>{let n=Me();return n.hash="",n.searchParams.delete("h"),n.searchParams.set("q",r),{url:n}}))}function vi(e,t){let r=new S;return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),w(e,"click").subscribe(n=>n.preventDefault()),us(e,t).pipe(T(n=>r.next(n)),C(()=>r.complete()),m(n=>F({ref:e},n)))}function gi(e,{rx$:t},{keyboard$:r}){let n=new S,o=Ae("search-query"),i=A(w(o,"keydown"),w(o,"focus")).pipe(qe(ve),m(()=>o.value),Q());return n.pipe(at(i),m(([{suggestions:c},s])=>{let u=s.split(/([\s-]+)/);if((c==null?void 0:c.length)&&u[u.length-1]){let f=c[c.length-1];f.startsWith(u[u.length-1])&&(u[u.length-1]=f)}else u.length=0;return u})).subscribe(c=>e.innerHTML=c.join("").replace(/\s/g," ")),r.pipe(M(({mode:c})=>c==="search")).subscribe(c=>{switch(c.type){case"ArrowRight":e.innerText.length&&o.selectionStart===o.value.length&&(o.value=e.innerText);break}}),t.pipe(M(lt),m(({data:c})=>c)).pipe(T(c=>n.next(c)),C(()=>n.complete()),m(()=>({ref:e})))}function yi(e,{index$:t,keyboard$:r}){let n=be();try{let o=(__search==null?void 0:__search.worker)||n.search,i=mi(o,t),a=Ae("search-query",e),c=Ae("search-result",e),{tx$:s,rx$:u}=i;s.pipe(M(li),st(u.pipe(M(pt))),le(1)).subscribe(s.next.bind(s)),r.pipe(M(({mode:p})=>p==="search")).subscribe(p=>{let d=Ue();switch(p.type){case"Enter":if(d===a){let h=new Map;for(let b of G(":first-child [href]",c)){let I=b.firstElementChild;h.set(b,parseFloat(I.getAttribute("data-md-score")))}if(h.size){let[[b]]=[...h].sort(([,I],[,Y])=>Y-I);b.click()}p.claim()}break;case"Escape":case"Tab":De("search",!1),a.blur();break;case"ArrowUp":case"ArrowDown":if(typeof d=="undefined")a.focus();else{let h=[a,...G(":not(details) > [href], summary, details[open] [href]",c)],b=Math.max(0,(Math.max(0,h.indexOf(d))+h.length+(p.type==="ArrowUp"?-1:1))%h.length);h[b].focus()}p.claim();break;default:a!==Ue()&&a.focus()}}),r.pipe(M(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":a.focus(),a.select(),p.claim();break}});let f=hi(a,i),l=bi(c,i,{query$:f});return A(f,l).pipe(Be(...ne("search-share",e).map(p=>vi(p,{query$:f})),...ne("search-suggest",e).map(p=>gi(p,i,{keyboard$:r}))))}catch(o){return e.hidden=!0,ye}}function xi(e,{index$:t,location$:r}){return q([t,r.pipe(V(Me()),M(n=>!!n.searchParams.get("h")))]).pipe(m(([n,o])=>Kr(n.config,!0)(o.searchParams.get("h"))),m(n=>{var a;let o=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let c=i.nextNode();c;c=i.nextNode())if((a=c.parentElement)==null?void 0:a.offsetHeight){let s=c.textContent,u=n(s);u.length>s.length&&o.set(c,u)}for(let[c,s]of o){let{childNodes:u}=R("span",null,s);c.replaceWith(...Array.from(u))}return{ref:e,nodes:o}}))}function fs(e,{viewport$:t,main$:r}){let n=e.parentElement,o=n.offsetTop-n.parentElement.offsetTop;return q([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:c}}])=>(a=a+Math.min(o,Math.max(0,c-i))-o,{height:a,locked:c>=i+o})),Q((i,a)=>i.height===a.height&&i.locked===a.locked))}function Yr(e,n){var o=n,{header$:t}=o,r=Zr(o,["header$"]);let i=K(".md-sidebar__scrollwrap",e),{y:a}=We(i);return U(()=>{let c=new S;return c.pipe(He(0,Se),Oe(t)).subscribe({next([{height:s},{height:u}]){i.style.height=`${s-2*a}px`,e.style.top=`${u}px`},complete(){i.style.height="",e.style.top=""}}),fs(e,r).pipe(T(s=>c.next(s)),C(()=>c.complete()),m(s=>F({ref:e},s)))})}function wi(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return xt(Le(`${r}/releases/latest`).pipe(m(n=>({version:n.tag_name})),Ye({})),Le(r).pipe(m(n=>({stars:n.stargazers_count,forks:n.forks_count})),Ye({}))).pipe(m(([n,o])=>F(F({},n),o)))}else{let r=`https://api.github.com/users/${e}`;return Le(r).pipe(m(n=>({repositories:n.public_repos})),Ye({}))}}function Si(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return Le(r).pipe(m(({star_count:n,forks_count:o})=>({stars:n,forks:o})),Ye({}))}function Ei(e){let[t]=e.match(/(git(?:hub|lab))/i)||[];switch(t.toLowerCase()){case"github":let[,r,n]=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);return wi(r,n);case"gitlab":let[,o,i]=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i);return Si(o,i);default:return N}}var ps;function ls(e){return ps||(ps=U(()=>{let t=__md_get("__source",sessionStorage);return t?k(t):Ei(e.href).pipe(T(r=>__md_set("__source",r,sessionStorage)))}).pipe(je(()=>N),M(t=>Object.keys(t).length>0),m(t=>({facts:t})),J(1)))}function Oi(e){let t=K(":scope > :last-child",e);return U(()=>{let r=new S;return r.subscribe(({facts:n})=>{t.appendChild(Wo(n)),t.setAttribute("data-md-state","done")}),ls(e).pipe(T(n=>r.next(n)),C(()=>r.complete()),m(n=>F({ref:e},n)))})}function ms(e,{viewport$:t,header$:r}){return he(document.body).pipe(_(()=>cr(e,{header$:r,viewport$:t})),m(({offset:{y:n}})=>({hidden:n>=10})),B("hidden"))}function Ti(e,t){return U(()=>{let r=new S;return r.subscribe({next({hidden:n}){n?e.setAttribute("data-md-state","hidden"):e.removeAttribute("data-md-state")},complete(){e.removeAttribute("data-md-state")}}),(ae("navigation.tabs.sticky")?k({hidden:!1}):ms(e,t)).pipe(T(n=>r.next(n)),C(()=>r.complete()),m(n=>F({ref:e},n)))})}function ds(e,{viewport$:t,header$:r}){let n=new Map,o=G("[href^=\\#]",e);for(let c of o){let s=decodeURIComponent(c.hash.substring(1)),u=ce(`[id="${s}"]`);typeof u!="undefined"&&n.set(c,u)}let i=r.pipe(m(c=>24+c.height));return he(document.body).pipe(B("height"),_(c=>U(()=>{let s=[];return k([...n].reduce((u,[f,l])=>{for(;s.length&&n.get(s[s.length-1]).tagName>=l.tagName;)s.pop();let p=l.offsetTop;for(;!p&&l.parentElement;)l=l.parentElement,p=l.offsetTop;return u.set([...s=[...s,f]].reverse(),p)},new Map))}).pipe(m(s=>new Map([...s].sort(([,u],[,f])=>u-f))),_(s=>q([t,i]).pipe(Rr(([u,f],[{offset:{y:l},size:p},d])=>{let h=l+p.height>=Math.floor(c.height);for(;f.length;){let[,b]=f[0];if(b-d=l&&!h)f=[u.pop(),...f];else break}return[u,f]},[[],[...s]]),Q((u,f)=>u[0]===f[0]&&u[1]===f[1])))))).pipe(m(([c,s])=>({prev:c.map(([u])=>u),next:s.map(([u])=>u)})),V({prev:[],next:[]}),Ee(2,1),m(([c,s])=>c.prev.length{let n=new S;return n.subscribe(({prev:o,next:i})=>{for(let[a]of i)a.removeAttribute("data-md-state"),a.classList.remove("md-nav__link--active");for(let[a,[c]]of o.entries())c.setAttribute("data-md-state","blur"),c.classList.toggle("md-nav__link--active",a===o.length-1)}),ae("navigation.tracking")&&t.pipe(se(n.pipe(ue(1))),B("offset"),Ke(250),Oe(n)).subscribe(([,{prev:o}])=>{let i=Me(),a=o[o.length-1];if(a&&a.length){let[c]=a,{hash:s}=new URL(c.href);i.hash!==s&&(i.hash=s,history.replaceState({},"",`${i}`))}else i.hash="",history.replaceState({},"",`${i}`)}),ds(e,{viewport$:t,header$:r}).pipe(T(o=>n.next(o)),C(()=>n.complete()),m(o=>F({ref:e},o)))})}function hs(e,{viewport$:t,main$:r,target$:n}){let o=t.pipe(m(({offset:{y:a}})=>a),Ee(2,1),m(([a,c])=>a>c&&c>0),Q()),i=r.pipe(m(({active:a})=>a));return q([i,o]).pipe(m(([a,c])=>!(a&&c)),Q(),se(n.pipe(ct(1))),Jt(!0),kr({delay:250}),m(a=>({hidden:a})))}function Mi(e,{viewport$:t,header$:r,main$:n,target$:o}){let i=new S;return i.subscribe({next({hidden:a}){a?(e.setAttribute("data-md-state","hidden"),e.setAttribute("tabindex","-1"),e.blur()):(e.removeAttribute("data-md-state"),e.removeAttribute("tabindex"))},complete(){e.style.top="",e.setAttribute("data-md-state","hidden"),e.removeAttribute("tabindex")}}),r.pipe(se(i.pipe(Jt(0),ue(1))),B("height")).subscribe(({height:a})=>{e.style.top=`${a+16}px`}),hs(e,{viewport$:t,main$:n,target$:o}).pipe(T(a=>i.next(a)),C(()=>i.complete()),m(a=>F({ref:e},a)))}function Li({document$:e,tablet$:t}){e.pipe(_(()=>k(...G("[data-md-state=indeterminate]"))),T(r=>{r.indeterminate=!0,r.checked=!1}),ie(r=>w(r,"change").pipe(Ir(()=>r.hasAttribute("data-md-state")),re(r))),Oe(t)).subscribe(([r,n])=>{r.removeAttribute("data-md-state"),n&&(r.checked=!1)})}function bs(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Ai({document$:e}){e.pipe(_(()=>k(...G("[data-md-scrollfix]"))),T(t=>t.removeAttribute("data-md-scrollfix")),M(bs),ie(t=>w(t,"touchstart").pipe(re(t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function Ci({viewport$:e,tablet$:t}){q([ir("search"),t]).pipe(m(([r,n])=>r&&!n),_(r=>k(r).pipe(Pe(r?400:100))),Oe(e)).subscribe(([r,{offset:{y:n}}])=>{if(r)document.body.setAttribute("data-md-state","lock"),document.body.style.top=`-${n}px`;else{let o=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-state"),document.body.style.top="",o&&window.scrollTo(0,o)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let n=e[r];typeof n!="object"?n=document.createTextNode(n):n.parentNode&&n.parentNode.removeChild(n),r?t.insertBefore(this.previousSibling,n):t.replaceChild(n,this)}}}));document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var mt=so(),ur=Eo(),fr=Lo(),Br=wo(),xe=Io(),pr=Wr("(min-width: 960px)"),ki=Wr("(min-width: 1220px)"),Ri=Ao(),Hi=be(),Pi=document.forms.namedItem("search")?(__search==null?void 0:__search.index)||Le(new URL("search/search_index.json",Hi.base)):ye,Gr=new S;si({alert$:Gr});ae("navigation.instant")&&ci({document$:mt,location$:ur,viewport$:xe});var $i;(($i=Hi.version)==null?void 0:$i.provider)==="mike"&&di();A(ur,fr).pipe(Pe(125)).subscribe(()=>{De("drawer",!1),De("search",!1)});Br.pipe(M(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=ce("[href][rel=prev]");typeof t!="undefined"&&t.click();break;case"n":case".":let r=ce("[href][rel=next]");typeof r!="undefined"&&r.click();break}});Li({document$:mt,tablet$:pr});Ai({document$:mt});Ci({viewport$:xe,tablet$:pr});var Ve=ri(Ae("header"),{viewport$:xe}),lr=mt.pipe(m(()=>Ae("main")),_(e=>ii(e,{viewport$:xe,header$:Ve})),J(1)),vs=A(...ne("dialog").map(e=>ti(e,{alert$:Gr})),...ne("header").map(e=>ni(e,{viewport$:xe,header$:Ve,main$:lr})),...ne("palette").map(e=>ai(e)),...ne("search").map(e=>yi(e,{index$:Pi,keyboard$:Br})),...ne("source").map(e=>Oi(e))),gs=U(()=>A(...ne("content").map(e=>ei(e,{target$:fr,print$:Ri})),...ne("content").map(e=>ae("search.highlight")?xi(e,{index$:Pi,location$:ur}):N),...ne("header-title").map(e=>oi(e,{viewport$:xe,header$:Ve})),...ne("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Dr(ki,()=>Yr(e,{viewport$:xe,header$:Ve,main$:lr})):Dr(pr,()=>Yr(e,{viewport$:xe,header$:Ve,main$:lr}))),...ne("tabs").map(e=>Ti(e,{viewport$:xe,header$:Ve})),...ne("toc").map(e=>_i(e,{viewport$:xe,header$:Ve})),...ne("top").map(e=>Mi(e,{viewport$:xe,header$:Ve,main$:lr,target$:fr})))),Ii=mt.pipe(_(()=>gs),Be(vs),J(1));Ii.subscribe();window.document$=mt;window.location$=ur;window.target$=fr;window.keyboard$=Br;window.viewport$=xe;window.tablet$=pr;window.screen$=ki;window.print$=Ri;window.alert$=Gr;window.component$=Ii;})(); -//# sourceMappingURL=bundle.960e086b.min.js.map - diff --git a/assets/javascripts/bundle.960e086b.min.js.map b/assets/javascripts/bundle.960e086b.min.js.map deleted file mode 100644 index d40b286787ad..000000000000 --- a/assets/javascripts/bundle.960e086b.min.js.map +++ /dev/null @@ -1,8 +0,0 @@ -{ - "version": 3, - "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/url-polyfill/url-polyfill.js", "node_modules/rxjs/node_modules/tslib/tslib.js", "node_modules/clipboard/dist/clipboard.js", "node_modules/escape-html/index.js", "node_modules/array-flat-polyfill/index.mjs", "src/assets/javascripts/bundle.ts", "node_modules/unfetch/polyfill/index.js", "node_modules/rxjs/node_modules/tslib/modules/index.js", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/concatMap.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/sample.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/switchMapTo.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/assets/javascripts/browser/document/index.ts", "src/assets/javascripts/browser/element/_/index.ts", "src/assets/javascripts/browser/element/focus/index.ts", "src/assets/javascripts/browser/element/offset/_/index.ts", "src/assets/javascripts/browser/element/offset/content/index.ts", "node_modules/resize-observer-polyfill/dist/ResizeObserver.es.js", "src/assets/javascripts/browser/element/size/_/index.ts", "src/assets/javascripts/browser/element/size/content/index.ts", "src/assets/javascripts/browser/element/visibility/index.ts", "src/assets/javascripts/browser/toggle/index.ts", "src/assets/javascripts/browser/keyboard/index.ts", "src/assets/javascripts/browser/location/_/index.ts", "src/assets/javascripts/utilities/h/index.ts", "src/assets/javascripts/utilities/string/index.ts", "src/assets/javascripts/browser/location/hash/index.ts", "src/assets/javascripts/browser/media/index.ts", "src/assets/javascripts/browser/request/index.ts", "src/assets/javascripts/browser/viewport/offset/index.ts", "src/assets/javascripts/browser/viewport/size/index.ts", "src/assets/javascripts/browser/viewport/_/index.ts", "src/assets/javascripts/browser/viewport/at/index.ts", "src/assets/javascripts/browser/worker/index.ts", "src/assets/javascripts/_/index.ts", "src/assets/javascripts/components/_/index.ts", "src/assets/javascripts/components/content/code/index.ts", "src/assets/javascripts/templates/annotation/index.tsx", "src/assets/javascripts/templates/clipboard/index.tsx", "src/assets/javascripts/templates/search/index.tsx", "src/assets/javascripts/templates/source/index.tsx", "src/assets/javascripts/templates/table/index.tsx", "src/assets/javascripts/templates/version/index.tsx", "src/assets/javascripts/components/content/annotation/_/index.ts", "src/assets/javascripts/components/content/annotation/list/index.ts", "src/assets/javascripts/components/content/details/index.ts", "src/assets/javascripts/components/content/table/index.ts", "src/assets/javascripts/components/content/tabs/index.ts", "src/assets/javascripts/components/content/_/index.ts", "src/assets/javascripts/components/dialog/index.ts", "src/assets/javascripts/components/header/_/index.ts", "src/assets/javascripts/components/header/title/index.ts", "src/assets/javascripts/components/main/index.ts", "src/assets/javascripts/components/palette/index.ts", "src/assets/javascripts/integrations/clipboard/index.ts", "src/assets/javascripts/integrations/instant/index.ts", "src/assets/javascripts/integrations/search/document/index.ts", "src/assets/javascripts/integrations/search/highlighter/index.ts", "src/assets/javascripts/integrations/search/query/transform/index.ts", "src/assets/javascripts/integrations/search/worker/message/index.ts", "src/assets/javascripts/integrations/search/worker/_/index.ts", "src/assets/javascripts/integrations/version/index.ts", "src/assets/javascripts/components/search/query/index.ts", "src/assets/javascripts/components/search/result/index.ts", "src/assets/javascripts/components/search/share/index.ts", "src/assets/javascripts/components/search/suggest/index.ts", "src/assets/javascripts/components/search/_/index.ts", "src/assets/javascripts/components/search/highlight/index.ts", "src/assets/javascripts/components/sidebar/index.ts", "src/assets/javascripts/components/source/facts/github/index.ts", "src/assets/javascripts/components/source/facts/gitlab/index.ts", "src/assets/javascripts/components/source/facts/_/index.ts", "src/assets/javascripts/components/source/_/index.ts", "src/assets/javascripts/components/tabs/index.ts", "src/assets/javascripts/components/toc/index.ts", "src/assets/javascripts/components/top/index.ts", "src/assets/javascripts/patches/indeterminate/index.ts", "src/assets/javascripts/patches/scrollfix/index.ts", "src/assets/javascripts/patches/scrolllock/index.ts", "src/assets/javascripts/polyfills/index.ts"], - "sourceRoot": "../../../..", - "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "(function(global) {\r\n /**\r\n * Polyfill URLSearchParams\r\n *\r\n * Inspired from : https://github.com/WebReflection/url-search-params/blob/master/src/url-search-params.js\r\n */\r\n\r\n var checkIfIteratorIsSupported = function() {\r\n try {\r\n return !!Symbol.iterator;\r\n } catch (error) {\r\n return false;\r\n }\r\n };\r\n\r\n\r\n var iteratorSupported = checkIfIteratorIsSupported();\r\n\r\n var createIterator = function(items) {\r\n var iterator = {\r\n next: function() {\r\n var value = items.shift();\r\n return { done: value === void 0, value: value };\r\n }\r\n };\r\n\r\n if (iteratorSupported) {\r\n iterator[Symbol.iterator] = function() {\r\n return iterator;\r\n };\r\n }\r\n\r\n return iterator;\r\n };\r\n\r\n /**\r\n * Search param name and values should be encoded according to https://url.spec.whatwg.org/#urlencoded-serializing\r\n * encodeURIComponent() produces the same result except encoding spaces as `%20` instead of `+`.\r\n */\r\n var serializeParam = function(value) {\r\n return encodeURIComponent(value).replace(/%20/g, '+');\r\n };\r\n\r\n var deserializeParam = function(value) {\r\n return decodeURIComponent(String(value).replace(/\\+/g, ' '));\r\n };\r\n\r\n var polyfillURLSearchParams = function() {\r\n\r\n var URLSearchParams = function(searchString) {\r\n Object.defineProperty(this, '_entries', { writable: true, value: {} });\r\n var typeofSearchString = typeof searchString;\r\n\r\n if (typeofSearchString === 'undefined') {\r\n // do nothing\r\n } else if (typeofSearchString === 'string') {\r\n if (searchString !== '') {\r\n this._fromString(searchString);\r\n }\r\n } else if (searchString instanceof URLSearchParams) {\r\n var _this = this;\r\n searchString.forEach(function(value, name) {\r\n _this.append(name, value);\r\n });\r\n } else if ((searchString !== null) && (typeofSearchString === 'object')) {\r\n if (Object.prototype.toString.call(searchString) === '[object Array]') {\r\n for (var i = 0; i < searchString.length; i++) {\r\n var entry = searchString[i];\r\n if ((Object.prototype.toString.call(entry) === '[object Array]') || (entry.length !== 2)) {\r\n this.append(entry[0], entry[1]);\r\n } else {\r\n throw new TypeError('Expected [string, any] as entry at index ' + i + ' of URLSearchParams\\'s input');\r\n }\r\n }\r\n } else {\r\n for (var key in searchString) {\r\n if (searchString.hasOwnProperty(key)) {\r\n this.append(key, searchString[key]);\r\n }\r\n }\r\n }\r\n } else {\r\n throw new TypeError('Unsupported input\\'s type for URLSearchParams');\r\n }\r\n };\r\n\r\n var proto = URLSearchParams.prototype;\r\n\r\n proto.append = function(name, value) {\r\n if (name in this._entries) {\r\n this._entries[name].push(String(value));\r\n } else {\r\n this._entries[name] = [String(value)];\r\n }\r\n };\r\n\r\n proto.delete = function(name) {\r\n delete this._entries[name];\r\n };\r\n\r\n proto.get = function(name) {\r\n return (name in this._entries) ? this._entries[name][0] : null;\r\n };\r\n\r\n proto.getAll = function(name) {\r\n return (name in this._entries) ? this._entries[name].slice(0) : [];\r\n };\r\n\r\n proto.has = function(name) {\r\n return (name in this._entries);\r\n };\r\n\r\n proto.set = function(name, value) {\r\n this._entries[name] = [String(value)];\r\n };\r\n\r\n proto.forEach = function(callback, thisArg) {\r\n var entries;\r\n for (var name in this._entries) {\r\n if (this._entries.hasOwnProperty(name)) {\r\n entries = this._entries[name];\r\n for (var i = 0; i < entries.length; i++) {\r\n callback.call(thisArg, entries[i], name, this);\r\n }\r\n }\r\n }\r\n };\r\n\r\n proto.keys = function() {\r\n var items = [];\r\n this.forEach(function(value, name) {\r\n items.push(name);\r\n });\r\n return createIterator(items);\r\n };\r\n\r\n proto.values = function() {\r\n var items = [];\r\n this.forEach(function(value) {\r\n items.push(value);\r\n });\r\n return createIterator(items);\r\n };\r\n\r\n proto.entries = function() {\r\n var items = [];\r\n this.forEach(function(value, name) {\r\n items.push([name, value]);\r\n });\r\n return createIterator(items);\r\n };\r\n\r\n if (iteratorSupported) {\r\n proto[Symbol.iterator] = proto.entries;\r\n }\r\n\r\n proto.toString = function() {\r\n var searchArray = [];\r\n this.forEach(function(value, name) {\r\n searchArray.push(serializeParam(name) + '=' + serializeParam(value));\r\n });\r\n return searchArray.join('&');\r\n };\r\n\r\n\r\n global.URLSearchParams = URLSearchParams;\r\n };\r\n\r\n var checkIfURLSearchParamsSupported = function() {\r\n try {\r\n var URLSearchParams = global.URLSearchParams;\r\n\r\n return (\r\n (new URLSearchParams('?a=1').toString() === 'a=1') &&\r\n (typeof URLSearchParams.prototype.set === 'function') &&\r\n (typeof URLSearchParams.prototype.entries === 'function')\r\n );\r\n } catch (e) {\r\n return false;\r\n }\r\n };\r\n\r\n if (!checkIfURLSearchParamsSupported()) {\r\n polyfillURLSearchParams();\r\n }\r\n\r\n var proto = global.URLSearchParams.prototype;\r\n\r\n if (typeof proto.sort !== 'function') {\r\n proto.sort = function() {\r\n var _this = this;\r\n var items = [];\r\n this.forEach(function(value, name) {\r\n items.push([name, value]);\r\n if (!_this._entries) {\r\n _this.delete(name);\r\n }\r\n });\r\n items.sort(function(a, b) {\r\n if (a[0] < b[0]) {\r\n return -1;\r\n } else if (a[0] > b[0]) {\r\n return +1;\r\n } else {\r\n return 0;\r\n }\r\n });\r\n if (_this._entries) { // force reset because IE keeps keys index\r\n _this._entries = {};\r\n }\r\n for (var i = 0; i < items.length; i++) {\r\n this.append(items[i][0], items[i][1]);\r\n }\r\n };\r\n }\r\n\r\n if (typeof proto._fromString !== 'function') {\r\n Object.defineProperty(proto, '_fromString', {\r\n enumerable: false,\r\n configurable: false,\r\n writable: false,\r\n value: function(searchString) {\r\n if (this._entries) {\r\n this._entries = {};\r\n } else {\r\n var keys = [];\r\n this.forEach(function(value, name) {\r\n keys.push(name);\r\n });\r\n for (var i = 0; i < keys.length; i++) {\r\n this.delete(keys[i]);\r\n }\r\n }\r\n\r\n searchString = searchString.replace(/^\\?/, '');\r\n var attributes = searchString.split('&');\r\n var attribute;\r\n for (var i = 0; i < attributes.length; i++) {\r\n attribute = attributes[i].split('=');\r\n this.append(\r\n deserializeParam(attribute[0]),\r\n (attribute.length > 1) ? deserializeParam(attribute[1]) : ''\r\n );\r\n }\r\n }\r\n });\r\n }\r\n\r\n // HTMLAnchorElement\r\n\r\n})(\r\n (typeof global !== 'undefined') ? global\r\n : ((typeof window !== 'undefined') ? window\r\n : ((typeof self !== 'undefined') ? self : this))\r\n);\r\n\r\n(function(global) {\r\n /**\r\n * Polyfill URL\r\n *\r\n * Inspired from : https://github.com/arv/DOM-URL-Polyfill/blob/master/src/url.js\r\n */\r\n\r\n var checkIfURLIsSupported = function() {\r\n try {\r\n var u = new global.URL('b', 'http://a');\r\n u.pathname = 'c d';\r\n return (u.href === 'http://a/c%20d') && u.searchParams;\r\n } catch (e) {\r\n return false;\r\n }\r\n };\r\n\r\n\r\n var polyfillURL = function() {\r\n var _URL = global.URL;\r\n\r\n var URL = function(url, base) {\r\n if (typeof url !== 'string') url = String(url);\r\n if (base && typeof base !== 'string') base = String(base);\r\n\r\n // Only create another document if the base is different from current location.\r\n var doc = document, baseElement;\r\n if (base && (global.location === void 0 || base !== global.location.href)) {\r\n base = base.toLowerCase();\r\n doc = document.implementation.createHTMLDocument('');\r\n baseElement = doc.createElement('base');\r\n baseElement.href = base;\r\n doc.head.appendChild(baseElement);\r\n try {\r\n if (baseElement.href.indexOf(base) !== 0) throw new Error(baseElement.href);\r\n } catch (err) {\r\n throw new Error('URL unable to set base ' + base + ' due to ' + err);\r\n }\r\n }\r\n\r\n var anchorElement = doc.createElement('a');\r\n anchorElement.href = url;\r\n if (baseElement) {\r\n doc.body.appendChild(anchorElement);\r\n anchorElement.href = anchorElement.href; // force href to refresh\r\n }\r\n\r\n var inputElement = doc.createElement('input');\r\n inputElement.type = 'url';\r\n inputElement.value = url;\r\n\r\n if (anchorElement.protocol === ':' || !/:/.test(anchorElement.href) || (!inputElement.checkValidity() && !base)) {\r\n throw new TypeError('Invalid URL');\r\n }\r\n\r\n Object.defineProperty(this, '_anchorElement', {\r\n value: anchorElement\r\n });\r\n\r\n\r\n // create a linked searchParams which reflect its changes on URL\r\n var searchParams = new global.URLSearchParams(this.search);\r\n var enableSearchUpdate = true;\r\n var enableSearchParamsUpdate = true;\r\n var _this = this;\r\n ['append', 'delete', 'set'].forEach(function(methodName) {\r\n var method = searchParams[methodName];\r\n searchParams[methodName] = function() {\r\n method.apply(searchParams, arguments);\r\n if (enableSearchUpdate) {\r\n enableSearchParamsUpdate = false;\r\n _this.search = searchParams.toString();\r\n enableSearchParamsUpdate = true;\r\n }\r\n };\r\n });\r\n\r\n Object.defineProperty(this, 'searchParams', {\r\n value: searchParams,\r\n enumerable: true\r\n });\r\n\r\n var search = void 0;\r\n Object.defineProperty(this, '_updateSearchParams', {\r\n enumerable: false,\r\n configurable: false,\r\n writable: false,\r\n value: function() {\r\n if (this.search !== search) {\r\n search = this.search;\r\n if (enableSearchParamsUpdate) {\r\n enableSearchUpdate = false;\r\n this.searchParams._fromString(this.search);\r\n enableSearchUpdate = true;\r\n }\r\n }\r\n }\r\n });\r\n };\r\n\r\n var proto = URL.prototype;\r\n\r\n var linkURLWithAnchorAttribute = function(attributeName) {\r\n Object.defineProperty(proto, attributeName, {\r\n get: function() {\r\n return this._anchorElement[attributeName];\r\n },\r\n set: function(value) {\r\n this._anchorElement[attributeName] = value;\r\n },\r\n enumerable: true\r\n });\r\n };\r\n\r\n ['hash', 'host', 'hostname', 'port', 'protocol']\r\n .forEach(function(attributeName) {\r\n linkURLWithAnchorAttribute(attributeName);\r\n });\r\n\r\n Object.defineProperty(proto, 'search', {\r\n get: function() {\r\n return this._anchorElement['search'];\r\n },\r\n set: function(value) {\r\n this._anchorElement['search'] = value;\r\n this._updateSearchParams();\r\n },\r\n enumerable: true\r\n });\r\n\r\n Object.defineProperties(proto, {\r\n\r\n 'toString': {\r\n get: function() {\r\n var _this = this;\r\n return function() {\r\n return _this.href;\r\n };\r\n }\r\n },\r\n\r\n 'href': {\r\n get: function() {\r\n return this._anchorElement.href.replace(/\\?$/, '');\r\n },\r\n set: function(value) {\r\n this._anchorElement.href = value;\r\n this._updateSearchParams();\r\n },\r\n enumerable: true\r\n },\r\n\r\n 'pathname': {\r\n get: function() {\r\n return this._anchorElement.pathname.replace(/(^\\/?)/, '/');\r\n },\r\n set: function(value) {\r\n this._anchorElement.pathname = value;\r\n },\r\n enumerable: true\r\n },\r\n\r\n 'origin': {\r\n get: function() {\r\n // get expected port from protocol\r\n var expectedPort = { 'http:': 80, 'https:': 443, 'ftp:': 21 }[this._anchorElement.protocol];\r\n // add port to origin if, expected port is different than actual port\r\n // and it is not empty f.e http://foo:8080\r\n // 8080 != 80 && 8080 != ''\r\n var addPortToOrigin = this._anchorElement.port != expectedPort &&\r\n this._anchorElement.port !== '';\r\n\r\n return this._anchorElement.protocol +\r\n '//' +\r\n this._anchorElement.hostname +\r\n (addPortToOrigin ? (':' + this._anchorElement.port) : '');\r\n },\r\n enumerable: true\r\n },\r\n\r\n 'password': { // TODO\r\n get: function() {\r\n return '';\r\n },\r\n set: function(value) {\r\n },\r\n enumerable: true\r\n },\r\n\r\n 'username': { // TODO\r\n get: function() {\r\n return '';\r\n },\r\n set: function(value) {\r\n },\r\n enumerable: true\r\n },\r\n });\r\n\r\n URL.createObjectURL = function(blob) {\r\n return _URL.createObjectURL.apply(_URL, arguments);\r\n };\r\n\r\n URL.revokeObjectURL = function(url) {\r\n return _URL.revokeObjectURL.apply(_URL, arguments);\r\n };\r\n\r\n global.URL = URL;\r\n\r\n };\r\n\r\n if (!checkIfURLIsSupported()) {\r\n polyfillURL();\r\n }\r\n\r\n if ((global.location !== void 0) && !('origin' in global.location)) {\r\n var getOrigin = function() {\r\n return global.location.protocol + '//' + global.location.hostname + (global.location.port ? (':' + global.location.port) : '');\r\n };\r\n\r\n try {\r\n Object.defineProperty(global.location, 'origin', {\r\n get: getOrigin,\r\n enumerable: true\r\n });\r\n } catch (e) {\r\n setInterval(function() {\r\n global.location.origin = getOrigin();\r\n }, 100);\r\n }\r\n }\r\n\r\n})(\r\n (typeof global !== 'undefined') ? global\r\n : ((typeof window !== 'undefined') ? window\r\n : ((typeof self !== 'undefined') ? self : this))\r\n);\r\n", "/*! *****************************************************************************\r\nCopyright (c) Microsoft Corporation.\r\n\r\nPermission to use, copy, modify, and/or distribute this software for any\r\npurpose with or without fee is hereby granted.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\r\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\r\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\r\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\r\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\r\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\r\nPERFORMANCE OF THIS SOFTWARE.\r\n***************************************************************************** */\r\n/* global global, define, System, Reflect, Promise */\r\nvar __extends;\r\nvar __assign;\r\nvar __rest;\r\nvar __decorate;\r\nvar __param;\r\nvar __metadata;\r\nvar __awaiter;\r\nvar __generator;\r\nvar __exportStar;\r\nvar __values;\r\nvar __read;\r\nvar __spread;\r\nvar __spreadArrays;\r\nvar __spreadArray;\r\nvar __await;\r\nvar __asyncGenerator;\r\nvar __asyncDelegator;\r\nvar __asyncValues;\r\nvar __makeTemplateObject;\r\nvar __importStar;\r\nvar __importDefault;\r\nvar __classPrivateFieldGet;\r\nvar __classPrivateFieldSet;\r\nvar __createBinding;\r\n(function (factory) {\r\n var root = typeof global === \"object\" ? global : typeof self === \"object\" ? self : typeof this === \"object\" ? this : {};\r\n if (typeof define === \"function\" && define.amd) {\r\n define(\"tslib\", [\"exports\"], function (exports) { factory(createExporter(root, createExporter(exports))); });\r\n }\r\n else if (typeof module === \"object\" && typeof module.exports === \"object\") {\r\n factory(createExporter(root, createExporter(module.exports)));\r\n }\r\n else {\r\n factory(createExporter(root));\r\n }\r\n function createExporter(exports, previous) {\r\n if (exports !== root) {\r\n if (typeof Object.create === \"function\") {\r\n Object.defineProperty(exports, \"__esModule\", { value: true });\r\n }\r\n else {\r\n exports.__esModule = true;\r\n }\r\n }\r\n return function (id, v) { return exports[id] = previous ? previous(id, v) : v; };\r\n }\r\n})\r\n(function (exporter) {\r\n var extendStatics = Object.setPrototypeOf ||\r\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\r\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\r\n\r\n __extends = function (d, b) {\r\n if (typeof b !== \"function\" && b !== null)\r\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\r\n extendStatics(d, b);\r\n function __() { this.constructor = d; }\r\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\r\n };\r\n\r\n __assign = Object.assign || function (t) {\r\n for (var s, i = 1, n = arguments.length; i < n; i++) {\r\n s = arguments[i];\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\r\n }\r\n return t;\r\n };\r\n\r\n __rest = function (s, e) {\r\n var t = {};\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\r\n t[p] = s[p];\r\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\r\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\r\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\r\n t[p[i]] = s[p[i]];\r\n }\r\n return t;\r\n };\r\n\r\n __decorate = function (decorators, target, key, desc) {\r\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\r\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\r\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\r\n return c > 3 && r && Object.defineProperty(target, key, r), r;\r\n };\r\n\r\n __param = function (paramIndex, decorator) {\r\n return function (target, key) { decorator(target, key, paramIndex); }\r\n };\r\n\r\n __metadata = function (metadataKey, metadataValue) {\r\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\r\n };\r\n\r\n __awaiter = function (thisArg, _arguments, P, generator) {\r\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\r\n return new (P || (P = Promise))(function (resolve, reject) {\r\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\r\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\r\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\r\n step((generator = generator.apply(thisArg, _arguments || [])).next());\r\n });\r\n };\r\n\r\n __generator = function (thisArg, body) {\r\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;\r\n return g = { next: verb(0), \"throw\": verb(1), \"return\": verb(2) }, typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\r\n function verb(n) { return function (v) { return step([n, v]); }; }\r\n function step(op) {\r\n if (f) throw new TypeError(\"Generator is already executing.\");\r\n while (_) try {\r\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\r\n if (y = 0, t) op = [op[0] & 2, t.value];\r\n switch (op[0]) {\r\n case 0: case 1: t = op; break;\r\n case 4: _.label++; return { value: op[1], done: false };\r\n case 5: _.label++; y = op[1]; op = [0]; continue;\r\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\r\n default:\r\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\r\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\r\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\r\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\r\n if (t[2]) _.ops.pop();\r\n _.trys.pop(); continue;\r\n }\r\n op = body.call(thisArg, _);\r\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\r\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\r\n }\r\n };\r\n\r\n __exportStar = function(m, o) {\r\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\r\n };\r\n\r\n __createBinding = Object.create ? (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });\r\n }) : (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n o[k2] = m[k];\r\n });\r\n\r\n __values = function (o) {\r\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\r\n if (m) return m.call(o);\r\n if (o && typeof o.length === \"number\") return {\r\n next: function () {\r\n if (o && i >= o.length) o = void 0;\r\n return { value: o && o[i++], done: !o };\r\n }\r\n };\r\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\r\n };\r\n\r\n __read = function (o, n) {\r\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\r\n if (!m) return o;\r\n var i = m.call(o), r, ar = [], e;\r\n try {\r\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\r\n }\r\n catch (error) { e = { error: error }; }\r\n finally {\r\n try {\r\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\r\n }\r\n finally { if (e) throw e.error; }\r\n }\r\n return ar;\r\n };\r\n\r\n /** @deprecated */\r\n __spread = function () {\r\n for (var ar = [], i = 0; i < arguments.length; i++)\r\n ar = ar.concat(__read(arguments[i]));\r\n return ar;\r\n };\r\n\r\n /** @deprecated */\r\n __spreadArrays = function () {\r\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\r\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\r\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\r\n r[k] = a[j];\r\n return r;\r\n };\r\n\r\n __spreadArray = function (to, from, pack) {\r\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\r\n if (ar || !(i in from)) {\r\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\r\n ar[i] = from[i];\r\n }\r\n }\r\n return to.concat(ar || Array.prototype.slice.call(from));\r\n };\r\n\r\n __await = function (v) {\r\n return this instanceof __await ? (this.v = v, this) : new __await(v);\r\n };\r\n\r\n __asyncGenerator = function (thisArg, _arguments, generator) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\r\n return i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i;\r\n function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }\r\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\r\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\r\n function fulfill(value) { resume(\"next\", value); }\r\n function reject(value) { resume(\"throw\", value); }\r\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\r\n };\r\n\r\n __asyncDelegator = function (o) {\r\n var i, p;\r\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\r\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === \"return\" } : f ? f(v) : v; } : f; }\r\n };\r\n\r\n __asyncValues = function (o) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var m = o[Symbol.asyncIterator], i;\r\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\r\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\r\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\r\n };\r\n\r\n __makeTemplateObject = function (cooked, raw) {\r\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\r\n return cooked;\r\n };\r\n\r\n var __setModuleDefault = Object.create ? (function(o, v) {\r\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\r\n }) : function(o, v) {\r\n o[\"default\"] = v;\r\n };\r\n\r\n __importStar = function (mod) {\r\n if (mod && mod.__esModule) return mod;\r\n var result = {};\r\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\r\n __setModuleDefault(result, mod);\r\n return result;\r\n };\r\n\r\n __importDefault = function (mod) {\r\n return (mod && mod.__esModule) ? mod : { \"default\": mod };\r\n };\r\n\r\n __classPrivateFieldGet = function (receiver, state, kind, f) {\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\r\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\r\n };\r\n\r\n __classPrivateFieldSet = function (receiver, state, value, kind, f) {\r\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\r\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\r\n };\r\n\r\n exporter(\"__extends\", __extends);\r\n exporter(\"__assign\", __assign);\r\n exporter(\"__rest\", __rest);\r\n exporter(\"__decorate\", __decorate);\r\n exporter(\"__param\", __param);\r\n exporter(\"__metadata\", __metadata);\r\n exporter(\"__awaiter\", __awaiter);\r\n exporter(\"__generator\", __generator);\r\n exporter(\"__exportStar\", __exportStar);\r\n exporter(\"__createBinding\", __createBinding);\r\n exporter(\"__values\", __values);\r\n exporter(\"__read\", __read);\r\n exporter(\"__spread\", __spread);\r\n exporter(\"__spreadArrays\", __spreadArrays);\r\n exporter(\"__spreadArray\", __spreadArray);\r\n exporter(\"__await\", __await);\r\n exporter(\"__asyncGenerator\", __asyncGenerator);\r\n exporter(\"__asyncDelegator\", __asyncDelegator);\r\n exporter(\"__asyncValues\", __asyncValues);\r\n exporter(\"__makeTemplateObject\", __makeTemplateObject);\r\n exporter(\"__importStar\", __importStar);\r\n exporter(\"__importDefault\", __importDefault);\r\n exporter(\"__classPrivateFieldGet\", __classPrivateFieldGet);\r\n exporter(\"__classPrivateFieldSet\", __classPrivateFieldSet);\r\n});\r\n", "/*!\n * clipboard.js v2.0.8\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 134:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/clipboard-action.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\n\n/**\n * Inner class which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n */\n\nvar ClipboardAction = /*#__PURE__*/function () {\n /**\n * @param {Object} options\n */\n function ClipboardAction(options) {\n _classCallCheck(this, ClipboardAction);\n\n this.resolveOptions(options);\n this.initSelection();\n }\n /**\n * Defines base properties passed from constructor.\n * @param {Object} options\n */\n\n\n _createClass(ClipboardAction, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = options.action;\n this.container = options.container;\n this.emitter = options.emitter;\n this.target = options.target;\n this.text = options.text;\n this.trigger = options.trigger;\n this.selectedText = '';\n }\n /**\n * Decides which selection strategy is going to be applied based\n * on the existence of `text` and `target` properties.\n */\n\n }, {\n key: \"initSelection\",\n value: function initSelection() {\n if (this.text) {\n this.selectFake();\n } else if (this.target) {\n this.selectTarget();\n }\n }\n /**\n * Creates a fake textarea element, sets its value from `text` property,\n */\n\n }, {\n key: \"createFakeElement\",\n value: function createFakeElement() {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n this.fakeElem = document.createElement('textarea'); // Prevent zooming on iOS\n\n this.fakeElem.style.fontSize = '12pt'; // Reset box model\n\n this.fakeElem.style.border = '0';\n this.fakeElem.style.padding = '0';\n this.fakeElem.style.margin = '0'; // Move element out of screen horizontally\n\n this.fakeElem.style.position = 'absolute';\n this.fakeElem.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n this.fakeElem.style.top = \"\".concat(yPosition, \"px\");\n this.fakeElem.setAttribute('readonly', '');\n this.fakeElem.value = this.text;\n return this.fakeElem;\n }\n /**\n * Get's the value of fakeElem,\n * and makes a selection on it.\n */\n\n }, {\n key: \"selectFake\",\n value: function selectFake() {\n var _this = this;\n\n var fakeElem = this.createFakeElement();\n\n this.fakeHandlerCallback = function () {\n return _this.removeFake();\n };\n\n this.fakeHandler = this.container.addEventListener('click', this.fakeHandlerCallback) || true;\n this.container.appendChild(fakeElem);\n this.selectedText = select_default()(fakeElem);\n this.copyText();\n this.removeFake();\n }\n /**\n * Only removes the fake element after another click event, that way\n * a user can hit `Ctrl+C` to copy because selection still exists.\n */\n\n }, {\n key: \"removeFake\",\n value: function removeFake() {\n if (this.fakeHandler) {\n this.container.removeEventListener('click', this.fakeHandlerCallback);\n this.fakeHandler = null;\n this.fakeHandlerCallback = null;\n }\n\n if (this.fakeElem) {\n this.container.removeChild(this.fakeElem);\n this.fakeElem = null;\n }\n }\n /**\n * Selects the content from element passed on `target` property.\n */\n\n }, {\n key: \"selectTarget\",\n value: function selectTarget() {\n this.selectedText = select_default()(this.target);\n this.copyText();\n }\n /**\n * Executes the copy operation based on the current selection.\n */\n\n }, {\n key: \"copyText\",\n value: function copyText() {\n var succeeded;\n\n try {\n succeeded = document.execCommand(this.action);\n } catch (err) {\n succeeded = false;\n }\n\n this.handleResult(succeeded);\n }\n /**\n * Fires an event based on the copy operation result.\n * @param {Boolean} succeeded\n */\n\n }, {\n key: \"handleResult\",\n value: function handleResult(succeeded) {\n this.emitter.emit(succeeded ? 'success' : 'error', {\n action: this.action,\n text: this.selectedText,\n trigger: this.trigger,\n clearSelection: this.clearSelection.bind(this)\n });\n }\n /**\n * Moves focus away from `target` and back to the trigger, removes current selection.\n */\n\n }, {\n key: \"clearSelection\",\n value: function clearSelection() {\n if (this.trigger) {\n this.trigger.focus();\n }\n\n document.activeElement.blur();\n window.getSelection().removeAllRanges();\n }\n /**\n * Sets the `action` to be performed which can be either 'copy' or 'cut'.\n * @param {String} action\n */\n\n }, {\n key: \"destroy\",\n\n /**\n * Destroy lifecycle.\n */\n value: function destroy() {\n this.removeFake();\n }\n }, {\n key: \"action\",\n set: function set() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : 'copy';\n this._action = action;\n\n if (this._action !== 'copy' && this._action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n }\n }\n /**\n * Gets the `action` property.\n * @return {String}\n */\n ,\n get: function get() {\n return this._action;\n }\n /**\n * Sets the `target` property using an element\n * that will be have its content copied.\n * @param {Element} target\n */\n\n }, {\n key: \"target\",\n set: function set(target) {\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (this.action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (this.action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n\n this._target = target;\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n }\n }\n /**\n * Gets the `target` property.\n * @return {String|HTMLElement}\n */\n ,\n get: function get() {\n return this._target;\n }\n }]);\n\n return ClipboardAction;\n}();\n\n/* harmony default export */ var clipboard_action = (ClipboardAction);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction clipboard_classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction clipboard_defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction clipboard_createClass(Constructor, protoProps, staticProps) { if (protoProps) clipboard_defineProperties(Constructor.prototype, protoProps); if (staticProps) clipboard_defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n clipboard_classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n clipboard_createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n\n if (this.clipboardAction) {\n this.clipboardAction = null;\n }\n\n this.clipboardAction = new clipboard_action({\n action: this.action(trigger),\n target: this.target(trigger),\n text: this.text(trigger),\n container: this.container,\n trigger: trigger,\n emitter: this\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n\n if (this.clipboardAction) {\n this.clipboardAction.destroy();\n this.clipboardAction = null;\n }\n }\n }], [{\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(134);\n/******/ })()\n.default;\n});", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "Array.prototype.flat||Object.defineProperty(Array.prototype,\"flat\",{configurable:!0,value:function r(){var t=isNaN(arguments[0])?1:Number(arguments[0]);return t?Array.prototype.reduce.call(this,function(a,e){return Array.isArray(e)?a.push.apply(a,r.call(e,t-1)):a.push(e),a},[]):Array.prototype.slice.call(this)},writable:!0}),Array.prototype.flatMap||Object.defineProperty(Array.prototype,\"flatMap\",{configurable:!0,value:function(r){return Array.prototype.map.apply(this,arguments).flat()},writable:!0})\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"array-flat-polyfill\"\nimport \"focus-visible\"\nimport \"unfetch/polyfill\"\nimport \"url-polyfill\"\n\nimport {\n EMPTY,\n NEVER,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getOptionalElement,\n requestJSON,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountBackToTop,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantLoading,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget()\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? __search?.index || requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up instant loading, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantLoading({ document$, location$, viewport$ })\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector()\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"[href][rel=prev]\")\n if (typeof prev !== \"undefined\")\n prev.click()\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"[href][rel=next]\")\n if (typeof next !== \"undefined\")\n next.click()\n break\n }\n })\n\n/* Set up patches */\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, { viewport$, header$ })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.component$ = component$ /* Component observable */\n", "self.fetch||(self.fetch=function(e,n){return n=n||{},new Promise(function(t,s){var r=new XMLHttpRequest,o=[],u=[],i={},a=function(){return{ok:2==(r.status/100|0),statusText:r.statusText,status:r.status,url:r.responseURL,text:function(){return Promise.resolve(r.responseText)},json:function(){return Promise.resolve(r.responseText).then(JSON.parse)},blob:function(){return Promise.resolve(new Blob([r.response]))},clone:a,headers:{keys:function(){return o},entries:function(){return u},get:function(e){return i[e.toLowerCase()]},has:function(e){return e.toLowerCase()in i}}}};for(var c in r.open(n.method||\"get\",e,!0),r.onload=function(){r.getAllResponseHeaders().replace(/^(.*?):[^\\S\\n]*([\\s\\S]*?)$/gm,function(e,n,t){o.push(n=n.toLowerCase()),u.push([n,t]),i[n]=i[n]?i[n]+\",\"+t:t}),t(a())},r.onerror=s,r.withCredentials=\"include\"==n.credentials,n.headers)r.setRequestHeader(c,n.headers[c]);r.send(n.body||null)})});\n", "import tslib from '../tslib.js';\r\nconst {\r\n __extends,\r\n __assign,\r\n __rest,\r\n __decorate,\r\n __param,\r\n __metadata,\r\n __awaiter,\r\n __generator,\r\n __exportStar,\r\n __createBinding,\r\n __values,\r\n __read,\r\n __spread,\r\n __spreadArrays,\r\n __spreadArray,\r\n __await,\r\n __asyncGenerator,\r\n __asyncDelegator,\r\n __asyncValues,\r\n __makeTemplateObject,\r\n __importStar,\r\n __importDefault,\r\n __classPrivateFieldGet,\r\n __classPrivateFieldSet,\r\n} = tslib;\r\nexport {\r\n __extends,\r\n __assign,\r\n __rest,\r\n __decorate,\r\n __param,\r\n __metadata,\r\n __awaiter,\r\n __generator,\r\n __exportStar,\r\n __createBinding,\r\n __values,\r\n __read,\r\n __spread,\r\n __spreadArrays,\r\n __spreadArray,\r\n __await,\r\n __asyncGenerator,\r\n __asyncDelegator,\r\n __asyncValues,\r\n __makeTemplateObject,\r\n __importStar,\r\n __importDefault,\r\n __classPrivateFieldGet,\r\n __classPrivateFieldSet,\r\n};\r\n", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n ReplaySubject,\n Subject,\n fromEvent\n} from \"rxjs\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch document\n *\n * Documents are implemented as subjects, so all downstream observables are\n * automatically updated when a new document is emitted.\n *\n * @returns Document subject\n */\nexport function watchDocument(): Subject {\n const document$ = new ReplaySubject(1)\n fromEvent(document, \"DOMContentLoaded\", { once: true })\n .subscribe(() => document$.next(document))\n\n /* Return document */\n return document$\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve all elements matching the query selector\n *\n * @template T - Element type\n *\n * @param selector - Query selector\n * @param node - Node of reference\n *\n * @returns Elements\n */\nexport function getElements(\n selector: T, node?: ParentNode\n): HTMLElementTagNameMap[T][]\n\nexport function getElements(\n selector: string, node?: ParentNode\n): T[]\n\nexport function getElements(\n selector: string, node: ParentNode = document\n): T[] {\n return Array.from(node.querySelectorAll(selector))\n}\n\n/**\n * Retrieve an element matching a query selector or throw a reference error\n *\n * Note that this function assumes that the element is present. If unsure if an\n * element is existent, use the `getOptionalElement` function instead.\n *\n * @template T - Element type\n *\n * @param selector - Query selector\n * @param node - Node of reference\n *\n * @returns Element\n */\nexport function getElement(\n selector: T, node?: ParentNode\n): HTMLElementTagNameMap[T]\n\nexport function getElement(\n selector: string, node?: ParentNode\n): T\n\nexport function getElement(\n selector: string, node: ParentNode = document\n): T {\n const el = getOptionalElement(selector, node)\n if (typeof el === \"undefined\")\n throw new ReferenceError(\n `Missing element: expected \"${selector}\" to be present`\n )\n\n /* Return element */\n return el\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Retrieve an optional element matching the query selector\n *\n * @template T - Element type\n *\n * @param selector - Query selector\n * @param node - Node of reference\n *\n * @returns Element or nothing\n */\nexport function getOptionalElement(\n selector: T, node?: ParentNode\n): HTMLElementTagNameMap[T] | undefined\n\nexport function getOptionalElement(\n selector: string, node?: ParentNode\n): T | undefined\n\nexport function getOptionalElement(\n selector: string, node: ParentNode = document\n): T | undefined {\n return node.querySelector(selector) || undefined\n}\n\n/**\n * Retrieve the currently active element\n *\n * @returns Element or nothing\n */\nexport function getActiveElement(): HTMLElement | undefined {\n return document.activeElement instanceof HTMLElement\n ? document.activeElement || undefined\n : undefined\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n debounceTime,\n distinctUntilChanged,\n fromEvent,\n map,\n merge,\n startWith\n} from \"rxjs\"\n\nimport { getActiveElement } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch element focus\n *\n * Previously, this function used `focus` and `blur` events to determine whether\n * an element is focused, but this doesn't work if there are focusable elements\n * within the elements itself. A better solutions are `focusin` and `focusout`\n * events, which bubble up the tree and allow for more fine-grained control.\n *\n * `debounceTime` is necessary, because when a focus change happens inside an\n * element, the observable would first emit `false` and then `true` again.\n *\n * @param el - Element\n *\n * @returns Element focus observable\n */\nexport function watchElementFocus(\n el: HTMLElement\n): Observable {\n return merge(\n fromEvent(document.body, \"focusin\"),\n fromEvent(document.body, \"focusout\")\n )\n .pipe(\n debounceTime(1),\n map(() => {\n const active = getActiveElement()\n return typeof active !== \"undefined\"\n ? el.contains(active)\n : false\n }),\n startWith(el === getActiveElement()),\n distinctUntilChanged()\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n animationFrameScheduler,\n auditTime,\n fromEvent,\n map,\n merge,\n startWith\n} from \"rxjs\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Element offset\n */\nexport interface ElementOffset {\n x: number /* Horizontal offset */\n y: number /* Vertical offset */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve element offset\n *\n * @param el - Element\n *\n * @returns Element offset\n */\nexport function getElementOffset(\n el: HTMLElement\n): ElementOffset {\n return {\n x: el.offsetLeft,\n y: el.offsetTop\n }\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch element offset\n *\n * @param el - Element\n *\n * @returns Element offset observable\n */\nexport function watchElementOffset(\n el: HTMLElement\n): Observable {\n return merge(\n fromEvent(window, \"load\"),\n fromEvent(window, \"resize\")\n )\n .pipe(\n auditTime(0, animationFrameScheduler),\n map(() => getElementOffset(el)),\n startWith(getElementOffset(el))\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n animationFrameScheduler,\n auditTime,\n fromEvent,\n map,\n merge,\n startWith\n} from \"rxjs\"\n\nimport { ElementOffset } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve element content offset (= scroll offset)\n *\n * @param el - Element\n *\n * @returns Element content offset\n */\nexport function getElementContentOffset(\n el: HTMLElement\n): ElementOffset {\n return {\n x: el.scrollLeft,\n y: el.scrollTop\n }\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch element content offset\n *\n * @param el - Element\n *\n * @returns Element content offset observable\n */\nexport function watchElementContentOffset(\n el: HTMLElement\n): Observable {\n return merge(\n fromEvent(el, \"scroll\"),\n fromEvent(window, \"resize\")\n )\n .pipe(\n auditTime(0, animationFrameScheduler),\n map(() => getElementContentOffset(el)),\n startWith(getElementContentOffset(el))\n )\n}\n", "/**\r\n * A collection of shims that provide minimal functionality of the ES6 collections.\r\n *\r\n * These implementations are not meant to be used outside of the ResizeObserver\r\n * modules as they cover only a limited range of use cases.\r\n */\r\n/* eslint-disable require-jsdoc, valid-jsdoc */\r\nvar MapShim = (function () {\r\n if (typeof Map !== 'undefined') {\r\n return Map;\r\n }\r\n /**\r\n * Returns index in provided array that matches the specified key.\r\n *\r\n * @param {Array} arr\r\n * @param {*} key\r\n * @returns {number}\r\n */\r\n function getIndex(arr, key) {\r\n var result = -1;\r\n arr.some(function (entry, index) {\r\n if (entry[0] === key) {\r\n result = index;\r\n return true;\r\n }\r\n return false;\r\n });\r\n return result;\r\n }\r\n return /** @class */ (function () {\r\n function class_1() {\r\n this.__entries__ = [];\r\n }\r\n Object.defineProperty(class_1.prototype, \"size\", {\r\n /**\r\n * @returns {boolean}\r\n */\r\n get: function () {\r\n return this.__entries__.length;\r\n },\r\n enumerable: true,\r\n configurable: true\r\n });\r\n /**\r\n * @param {*} key\r\n * @returns {*}\r\n */\r\n class_1.prototype.get = function (key) {\r\n var index = getIndex(this.__entries__, key);\r\n var entry = this.__entries__[index];\r\n return entry && entry[1];\r\n };\r\n /**\r\n * @param {*} key\r\n * @param {*} value\r\n * @returns {void}\r\n */\r\n class_1.prototype.set = function (key, value) {\r\n var index = getIndex(this.__entries__, key);\r\n if (~index) {\r\n this.__entries__[index][1] = value;\r\n }\r\n else {\r\n this.__entries__.push([key, value]);\r\n }\r\n };\r\n /**\r\n * @param {*} key\r\n * @returns {void}\r\n */\r\n class_1.prototype.delete = function (key) {\r\n var entries = this.__entries__;\r\n var index = getIndex(entries, key);\r\n if (~index) {\r\n entries.splice(index, 1);\r\n }\r\n };\r\n /**\r\n * @param {*} key\r\n * @returns {void}\r\n */\r\n class_1.prototype.has = function (key) {\r\n return !!~getIndex(this.__entries__, key);\r\n };\r\n /**\r\n * @returns {void}\r\n */\r\n class_1.prototype.clear = function () {\r\n this.__entries__.splice(0);\r\n };\r\n /**\r\n * @param {Function} callback\r\n * @param {*} [ctx=null]\r\n * @returns {void}\r\n */\r\n class_1.prototype.forEach = function (callback, ctx) {\r\n if (ctx === void 0) { ctx = null; }\r\n for (var _i = 0, _a = this.__entries__; _i < _a.length; _i++) {\r\n var entry = _a[_i];\r\n callback.call(ctx, entry[1], entry[0]);\r\n }\r\n };\r\n return class_1;\r\n }());\r\n})();\n\n/**\r\n * Detects whether window and document objects are available in current environment.\r\n */\r\nvar isBrowser = typeof window !== 'undefined' && typeof document !== 'undefined' && window.document === document;\n\n// Returns global object of a current environment.\r\nvar global$1 = (function () {\r\n if (typeof global !== 'undefined' && global.Math === Math) {\r\n return global;\r\n }\r\n if (typeof self !== 'undefined' && self.Math === Math) {\r\n return self;\r\n }\r\n if (typeof window !== 'undefined' && window.Math === Math) {\r\n return window;\r\n }\r\n // eslint-disable-next-line no-new-func\r\n return Function('return this')();\r\n})();\n\n/**\r\n * A shim for the requestAnimationFrame which falls back to the setTimeout if\r\n * first one is not supported.\r\n *\r\n * @returns {number} Requests' identifier.\r\n */\r\nvar requestAnimationFrame$1 = (function () {\r\n if (typeof requestAnimationFrame === 'function') {\r\n // It's required to use a bounded function because IE sometimes throws\r\n // an \"Invalid calling object\" error if rAF is invoked without the global\r\n // object on the left hand side.\r\n return requestAnimationFrame.bind(global$1);\r\n }\r\n return function (callback) { return setTimeout(function () { return callback(Date.now()); }, 1000 / 60); };\r\n})();\n\n// Defines minimum timeout before adding a trailing call.\r\nvar trailingTimeout = 2;\r\n/**\r\n * Creates a wrapper function which ensures that provided callback will be\r\n * invoked only once during the specified delay period.\r\n *\r\n * @param {Function} callback - Function to be invoked after the delay period.\r\n * @param {number} delay - Delay after which to invoke callback.\r\n * @returns {Function}\r\n */\r\nfunction throttle (callback, delay) {\r\n var leadingCall = false, trailingCall = false, lastCallTime = 0;\r\n /**\r\n * Invokes the original callback function and schedules new invocation if\r\n * the \"proxy\" was called during current request.\r\n *\r\n * @returns {void}\r\n */\r\n function resolvePending() {\r\n if (leadingCall) {\r\n leadingCall = false;\r\n callback();\r\n }\r\n if (trailingCall) {\r\n proxy();\r\n }\r\n }\r\n /**\r\n * Callback invoked after the specified delay. It will further postpone\r\n * invocation of the original function delegating it to the\r\n * requestAnimationFrame.\r\n *\r\n * @returns {void}\r\n */\r\n function timeoutCallback() {\r\n requestAnimationFrame$1(resolvePending);\r\n }\r\n /**\r\n * Schedules invocation of the original function.\r\n *\r\n * @returns {void}\r\n */\r\n function proxy() {\r\n var timeStamp = Date.now();\r\n if (leadingCall) {\r\n // Reject immediately following calls.\r\n if (timeStamp - lastCallTime < trailingTimeout) {\r\n return;\r\n }\r\n // Schedule new call to be in invoked when the pending one is resolved.\r\n // This is important for \"transitions\" which never actually start\r\n // immediately so there is a chance that we might miss one if change\r\n // happens amids the pending invocation.\r\n trailingCall = true;\r\n }\r\n else {\r\n leadingCall = true;\r\n trailingCall = false;\r\n setTimeout(timeoutCallback, delay);\r\n }\r\n lastCallTime = timeStamp;\r\n }\r\n return proxy;\r\n}\n\n// Minimum delay before invoking the update of observers.\r\nvar REFRESH_DELAY = 20;\r\n// A list of substrings of CSS properties used to find transition events that\r\n// might affect dimensions of observed elements.\r\nvar transitionKeys = ['top', 'right', 'bottom', 'left', 'width', 'height', 'size', 'weight'];\r\n// Check if MutationObserver is available.\r\nvar mutationObserverSupported = typeof MutationObserver !== 'undefined';\r\n/**\r\n * Singleton controller class which handles updates of ResizeObserver instances.\r\n */\r\nvar ResizeObserverController = /** @class */ (function () {\r\n /**\r\n * Creates a new instance of ResizeObserverController.\r\n *\r\n * @private\r\n */\r\n function ResizeObserverController() {\r\n /**\r\n * Indicates whether DOM listeners have been added.\r\n *\r\n * @private {boolean}\r\n */\r\n this.connected_ = false;\r\n /**\r\n * Tells that controller has subscribed for Mutation Events.\r\n *\r\n * @private {boolean}\r\n */\r\n this.mutationEventsAdded_ = false;\r\n /**\r\n * Keeps reference to the instance of MutationObserver.\r\n *\r\n * @private {MutationObserver}\r\n */\r\n this.mutationsObserver_ = null;\r\n /**\r\n * A list of connected observers.\r\n *\r\n * @private {Array}\r\n */\r\n this.observers_ = [];\r\n this.onTransitionEnd_ = this.onTransitionEnd_.bind(this);\r\n this.refresh = throttle(this.refresh.bind(this), REFRESH_DELAY);\r\n }\r\n /**\r\n * Adds observer to observers list.\r\n *\r\n * @param {ResizeObserverSPI} observer - Observer to be added.\r\n * @returns {void}\r\n */\r\n ResizeObserverController.prototype.addObserver = function (observer) {\r\n if (!~this.observers_.indexOf(observer)) {\r\n this.observers_.push(observer);\r\n }\r\n // Add listeners if they haven't been added yet.\r\n if (!this.connected_) {\r\n this.connect_();\r\n }\r\n };\r\n /**\r\n * Removes observer from observers list.\r\n *\r\n * @param {ResizeObserverSPI} observer - Observer to be removed.\r\n * @returns {void}\r\n */\r\n ResizeObserverController.prototype.removeObserver = function (observer) {\r\n var observers = this.observers_;\r\n var index = observers.indexOf(observer);\r\n // Remove observer if it's present in registry.\r\n if (~index) {\r\n observers.splice(index, 1);\r\n }\r\n // Remove listeners if controller has no connected observers.\r\n if (!observers.length && this.connected_) {\r\n this.disconnect_();\r\n }\r\n };\r\n /**\r\n * Invokes the update of observers. It will continue running updates insofar\r\n * it detects changes.\r\n *\r\n * @returns {void}\r\n */\r\n ResizeObserverController.prototype.refresh = function () {\r\n var changesDetected = this.updateObservers_();\r\n // Continue running updates if changes have been detected as there might\r\n // be future ones caused by CSS transitions.\r\n if (changesDetected) {\r\n this.refresh();\r\n }\r\n };\r\n /**\r\n * Updates every observer from observers list and notifies them of queued\r\n * entries.\r\n *\r\n * @private\r\n * @returns {boolean} Returns \"true\" if any observer has detected changes in\r\n * dimensions of it's elements.\r\n */\r\n ResizeObserverController.prototype.updateObservers_ = function () {\r\n // Collect observers that have active observations.\r\n var activeObservers = this.observers_.filter(function (observer) {\r\n return observer.gatherActive(), observer.hasActive();\r\n });\r\n // Deliver notifications in a separate cycle in order to avoid any\r\n // collisions between observers, e.g. when multiple instances of\r\n // ResizeObserver are tracking the same element and the callback of one\r\n // of them changes content dimensions of the observed target. Sometimes\r\n // this may result in notifications being blocked for the rest of observers.\r\n activeObservers.forEach(function (observer) { return observer.broadcastActive(); });\r\n return activeObservers.length > 0;\r\n };\r\n /**\r\n * Initializes DOM listeners.\r\n *\r\n * @private\r\n * @returns {void}\r\n */\r\n ResizeObserverController.prototype.connect_ = function () {\r\n // Do nothing if running in a non-browser environment or if listeners\r\n // have been already added.\r\n if (!isBrowser || this.connected_) {\r\n return;\r\n }\r\n // Subscription to the \"Transitionend\" event is used as a workaround for\r\n // delayed transitions. This way it's possible to capture at least the\r\n // final state of an element.\r\n document.addEventListener('transitionend', this.onTransitionEnd_);\r\n window.addEventListener('resize', this.refresh);\r\n if (mutationObserverSupported) {\r\n this.mutationsObserver_ = new MutationObserver(this.refresh);\r\n this.mutationsObserver_.observe(document, {\r\n attributes: true,\r\n childList: true,\r\n characterData: true,\r\n subtree: true\r\n });\r\n }\r\n else {\r\n document.addEventListener('DOMSubtreeModified', this.refresh);\r\n this.mutationEventsAdded_ = true;\r\n }\r\n this.connected_ = true;\r\n };\r\n /**\r\n * Removes DOM listeners.\r\n *\r\n * @private\r\n * @returns {void}\r\n */\r\n ResizeObserverController.prototype.disconnect_ = function () {\r\n // Do nothing if running in a non-browser environment or if listeners\r\n // have been already removed.\r\n if (!isBrowser || !this.connected_) {\r\n return;\r\n }\r\n document.removeEventListener('transitionend', this.onTransitionEnd_);\r\n window.removeEventListener('resize', this.refresh);\r\n if (this.mutationsObserver_) {\r\n this.mutationsObserver_.disconnect();\r\n }\r\n if (this.mutationEventsAdded_) {\r\n document.removeEventListener('DOMSubtreeModified', this.refresh);\r\n }\r\n this.mutationsObserver_ = null;\r\n this.mutationEventsAdded_ = false;\r\n this.connected_ = false;\r\n };\r\n /**\r\n * \"Transitionend\" event handler.\r\n *\r\n * @private\r\n * @param {TransitionEvent} event\r\n * @returns {void}\r\n */\r\n ResizeObserverController.prototype.onTransitionEnd_ = function (_a) {\r\n var _b = _a.propertyName, propertyName = _b === void 0 ? '' : _b;\r\n // Detect whether transition may affect dimensions of an element.\r\n var isReflowProperty = transitionKeys.some(function (key) {\r\n return !!~propertyName.indexOf(key);\r\n });\r\n if (isReflowProperty) {\r\n this.refresh();\r\n }\r\n };\r\n /**\r\n * Returns instance of the ResizeObserverController.\r\n *\r\n * @returns {ResizeObserverController}\r\n */\r\n ResizeObserverController.getInstance = function () {\r\n if (!this.instance_) {\r\n this.instance_ = new ResizeObserverController();\r\n }\r\n return this.instance_;\r\n };\r\n /**\r\n * Holds reference to the controller's instance.\r\n *\r\n * @private {ResizeObserverController}\r\n */\r\n ResizeObserverController.instance_ = null;\r\n return ResizeObserverController;\r\n}());\n\n/**\r\n * Defines non-writable/enumerable properties of the provided target object.\r\n *\r\n * @param {Object} target - Object for which to define properties.\r\n * @param {Object} props - Properties to be defined.\r\n * @returns {Object} Target object.\r\n */\r\nvar defineConfigurable = (function (target, props) {\r\n for (var _i = 0, _a = Object.keys(props); _i < _a.length; _i++) {\r\n var key = _a[_i];\r\n Object.defineProperty(target, key, {\r\n value: props[key],\r\n enumerable: false,\r\n writable: false,\r\n configurable: true\r\n });\r\n }\r\n return target;\r\n});\n\n/**\r\n * Returns the global object associated with provided element.\r\n *\r\n * @param {Object} target\r\n * @returns {Object}\r\n */\r\nvar getWindowOf = (function (target) {\r\n // Assume that the element is an instance of Node, which means that it\r\n // has the \"ownerDocument\" property from which we can retrieve a\r\n // corresponding global object.\r\n var ownerGlobal = target && target.ownerDocument && target.ownerDocument.defaultView;\r\n // Return the local global object if it's not possible extract one from\r\n // provided element.\r\n return ownerGlobal || global$1;\r\n});\n\n// Placeholder of an empty content rectangle.\r\nvar emptyRect = createRectInit(0, 0, 0, 0);\r\n/**\r\n * Converts provided string to a number.\r\n *\r\n * @param {number|string} value\r\n * @returns {number}\r\n */\r\nfunction toFloat(value) {\r\n return parseFloat(value) || 0;\r\n}\r\n/**\r\n * Extracts borders size from provided styles.\r\n *\r\n * @param {CSSStyleDeclaration} styles\r\n * @param {...string} positions - Borders positions (top, right, ...)\r\n * @returns {number}\r\n */\r\nfunction getBordersSize(styles) {\r\n var positions = [];\r\n for (var _i = 1; _i < arguments.length; _i++) {\r\n positions[_i - 1] = arguments[_i];\r\n }\r\n return positions.reduce(function (size, position) {\r\n var value = styles['border-' + position + '-width'];\r\n return size + toFloat(value);\r\n }, 0);\r\n}\r\n/**\r\n * Extracts paddings sizes from provided styles.\r\n *\r\n * @param {CSSStyleDeclaration} styles\r\n * @returns {Object} Paddings box.\r\n */\r\nfunction getPaddings(styles) {\r\n var positions = ['top', 'right', 'bottom', 'left'];\r\n var paddings = {};\r\n for (var _i = 0, positions_1 = positions; _i < positions_1.length; _i++) {\r\n var position = positions_1[_i];\r\n var value = styles['padding-' + position];\r\n paddings[position] = toFloat(value);\r\n }\r\n return paddings;\r\n}\r\n/**\r\n * Calculates content rectangle of provided SVG element.\r\n *\r\n * @param {SVGGraphicsElement} target - Element content rectangle of which needs\r\n * to be calculated.\r\n * @returns {DOMRectInit}\r\n */\r\nfunction getSVGContentRect(target) {\r\n var bbox = target.getBBox();\r\n return createRectInit(0, 0, bbox.width, bbox.height);\r\n}\r\n/**\r\n * Calculates content rectangle of provided HTMLElement.\r\n *\r\n * @param {HTMLElement} target - Element for which to calculate the content rectangle.\r\n * @returns {DOMRectInit}\r\n */\r\nfunction getHTMLElementContentRect(target) {\r\n // Client width & height properties can't be\r\n // used exclusively as they provide rounded values.\r\n var clientWidth = target.clientWidth, clientHeight = target.clientHeight;\r\n // By this condition we can catch all non-replaced inline, hidden and\r\n // detached elements. Though elements with width & height properties less\r\n // than 0.5 will be discarded as well.\r\n //\r\n // Without it we would need to implement separate methods for each of\r\n // those cases and it's not possible to perform a precise and performance\r\n // effective test for hidden elements. E.g. even jQuery's ':visible' filter\r\n // gives wrong results for elements with width & height less than 0.5.\r\n if (!clientWidth && !clientHeight) {\r\n return emptyRect;\r\n }\r\n var styles = getWindowOf(target).getComputedStyle(target);\r\n var paddings = getPaddings(styles);\r\n var horizPad = paddings.left + paddings.right;\r\n var vertPad = paddings.top + paddings.bottom;\r\n // Computed styles of width & height are being used because they are the\r\n // only dimensions available to JS that contain non-rounded values. It could\r\n // be possible to utilize the getBoundingClientRect if only it's data wasn't\r\n // affected by CSS transformations let alone paddings, borders and scroll bars.\r\n var width = toFloat(styles.width), height = toFloat(styles.height);\r\n // Width & height include paddings and borders when the 'border-box' box\r\n // model is applied (except for IE).\r\n if (styles.boxSizing === 'border-box') {\r\n // Following conditions are required to handle Internet Explorer which\r\n // doesn't include paddings and borders to computed CSS dimensions.\r\n //\r\n // We can say that if CSS dimensions + paddings are equal to the \"client\"\r\n // properties then it's either IE, and thus we don't need to subtract\r\n // anything, or an element merely doesn't have paddings/borders styles.\r\n if (Math.round(width + horizPad) !== clientWidth) {\r\n width -= getBordersSize(styles, 'left', 'right') + horizPad;\r\n }\r\n if (Math.round(height + vertPad) !== clientHeight) {\r\n height -= getBordersSize(styles, 'top', 'bottom') + vertPad;\r\n }\r\n }\r\n // Following steps can't be applied to the document's root element as its\r\n // client[Width/Height] properties represent viewport area of the window.\r\n // Besides, it's as well not necessary as the itself neither has\r\n // rendered scroll bars nor it can be clipped.\r\n if (!isDocumentElement(target)) {\r\n // In some browsers (only in Firefox, actually) CSS width & height\r\n // include scroll bars size which can be removed at this step as scroll\r\n // bars are the only difference between rounded dimensions + paddings\r\n // and \"client\" properties, though that is not always true in Chrome.\r\n var vertScrollbar = Math.round(width + horizPad) - clientWidth;\r\n var horizScrollbar = Math.round(height + vertPad) - clientHeight;\r\n // Chrome has a rather weird rounding of \"client\" properties.\r\n // E.g. for an element with content width of 314.2px it sometimes gives\r\n // the client width of 315px and for the width of 314.7px it may give\r\n // 314px. And it doesn't happen all the time. So just ignore this delta\r\n // as a non-relevant.\r\n if (Math.abs(vertScrollbar) !== 1) {\r\n width -= vertScrollbar;\r\n }\r\n if (Math.abs(horizScrollbar) !== 1) {\r\n height -= horizScrollbar;\r\n }\r\n }\r\n return createRectInit(paddings.left, paddings.top, width, height);\r\n}\r\n/**\r\n * Checks whether provided element is an instance of the SVGGraphicsElement.\r\n *\r\n * @param {Element} target - Element to be checked.\r\n * @returns {boolean}\r\n */\r\nvar isSVGGraphicsElement = (function () {\r\n // Some browsers, namely IE and Edge, don't have the SVGGraphicsElement\r\n // interface.\r\n if (typeof SVGGraphicsElement !== 'undefined') {\r\n return function (target) { return target instanceof getWindowOf(target).SVGGraphicsElement; };\r\n }\r\n // If it's so, then check that element is at least an instance of the\r\n // SVGElement and that it has the \"getBBox\" method.\r\n // eslint-disable-next-line no-extra-parens\r\n return function (target) { return (target instanceof getWindowOf(target).SVGElement &&\r\n typeof target.getBBox === 'function'); };\r\n})();\r\n/**\r\n * Checks whether provided element is a document element ().\r\n *\r\n * @param {Element} target - Element to be checked.\r\n * @returns {boolean}\r\n */\r\nfunction isDocumentElement(target) {\r\n return target === getWindowOf(target).document.documentElement;\r\n}\r\n/**\r\n * Calculates an appropriate content rectangle for provided html or svg element.\r\n *\r\n * @param {Element} target - Element content rectangle of which needs to be calculated.\r\n * @returns {DOMRectInit}\r\n */\r\nfunction getContentRect(target) {\r\n if (!isBrowser) {\r\n return emptyRect;\r\n }\r\n if (isSVGGraphicsElement(target)) {\r\n return getSVGContentRect(target);\r\n }\r\n return getHTMLElementContentRect(target);\r\n}\r\n/**\r\n * Creates rectangle with an interface of the DOMRectReadOnly.\r\n * Spec: https://drafts.fxtf.org/geometry/#domrectreadonly\r\n *\r\n * @param {DOMRectInit} rectInit - Object with rectangle's x/y coordinates and dimensions.\r\n * @returns {DOMRectReadOnly}\r\n */\r\nfunction createReadOnlyRect(_a) {\r\n var x = _a.x, y = _a.y, width = _a.width, height = _a.height;\r\n // If DOMRectReadOnly is available use it as a prototype for the rectangle.\r\n var Constr = typeof DOMRectReadOnly !== 'undefined' ? DOMRectReadOnly : Object;\r\n var rect = Object.create(Constr.prototype);\r\n // Rectangle's properties are not writable and non-enumerable.\r\n defineConfigurable(rect, {\r\n x: x, y: y, width: width, height: height,\r\n top: y,\r\n right: x + width,\r\n bottom: height + y,\r\n left: x\r\n });\r\n return rect;\r\n}\r\n/**\r\n * Creates DOMRectInit object based on the provided dimensions and the x/y coordinates.\r\n * Spec: https://drafts.fxtf.org/geometry/#dictdef-domrectinit\r\n *\r\n * @param {number} x - X coordinate.\r\n * @param {number} y - Y coordinate.\r\n * @param {number} width - Rectangle's width.\r\n * @param {number} height - Rectangle's height.\r\n * @returns {DOMRectInit}\r\n */\r\nfunction createRectInit(x, y, width, height) {\r\n return { x: x, y: y, width: width, height: height };\r\n}\n\n/**\r\n * Class that is responsible for computations of the content rectangle of\r\n * provided DOM element and for keeping track of it's changes.\r\n */\r\nvar ResizeObservation = /** @class */ (function () {\r\n /**\r\n * Creates an instance of ResizeObservation.\r\n *\r\n * @param {Element} target - Element to be observed.\r\n */\r\n function ResizeObservation(target) {\r\n /**\r\n * Broadcasted width of content rectangle.\r\n *\r\n * @type {number}\r\n */\r\n this.broadcastWidth = 0;\r\n /**\r\n * Broadcasted height of content rectangle.\r\n *\r\n * @type {number}\r\n */\r\n this.broadcastHeight = 0;\r\n /**\r\n * Reference to the last observed content rectangle.\r\n *\r\n * @private {DOMRectInit}\r\n */\r\n this.contentRect_ = createRectInit(0, 0, 0, 0);\r\n this.target = target;\r\n }\r\n /**\r\n * Updates content rectangle and tells whether it's width or height properties\r\n * have changed since the last broadcast.\r\n *\r\n * @returns {boolean}\r\n */\r\n ResizeObservation.prototype.isActive = function () {\r\n var rect = getContentRect(this.target);\r\n this.contentRect_ = rect;\r\n return (rect.width !== this.broadcastWidth ||\r\n rect.height !== this.broadcastHeight);\r\n };\r\n /**\r\n * Updates 'broadcastWidth' and 'broadcastHeight' properties with a data\r\n * from the corresponding properties of the last observed content rectangle.\r\n *\r\n * @returns {DOMRectInit} Last observed content rectangle.\r\n */\r\n ResizeObservation.prototype.broadcastRect = function () {\r\n var rect = this.contentRect_;\r\n this.broadcastWidth = rect.width;\r\n this.broadcastHeight = rect.height;\r\n return rect;\r\n };\r\n return ResizeObservation;\r\n}());\n\nvar ResizeObserverEntry = /** @class */ (function () {\r\n /**\r\n * Creates an instance of ResizeObserverEntry.\r\n *\r\n * @param {Element} target - Element that is being observed.\r\n * @param {DOMRectInit} rectInit - Data of the element's content rectangle.\r\n */\r\n function ResizeObserverEntry(target, rectInit) {\r\n var contentRect = createReadOnlyRect(rectInit);\r\n // According to the specification following properties are not writable\r\n // and are also not enumerable in the native implementation.\r\n //\r\n // Property accessors are not being used as they'd require to define a\r\n // private WeakMap storage which may cause memory leaks in browsers that\r\n // don't support this type of collections.\r\n defineConfigurable(this, { target: target, contentRect: contentRect });\r\n }\r\n return ResizeObserverEntry;\r\n}());\n\nvar ResizeObserverSPI = /** @class */ (function () {\r\n /**\r\n * Creates a new instance of ResizeObserver.\r\n *\r\n * @param {ResizeObserverCallback} callback - Callback function that is invoked\r\n * when one of the observed elements changes it's content dimensions.\r\n * @param {ResizeObserverController} controller - Controller instance which\r\n * is responsible for the updates of observer.\r\n * @param {ResizeObserver} callbackCtx - Reference to the public\r\n * ResizeObserver instance which will be passed to callback function.\r\n */\r\n function ResizeObserverSPI(callback, controller, callbackCtx) {\r\n /**\r\n * Collection of resize observations that have detected changes in dimensions\r\n * of elements.\r\n *\r\n * @private {Array}\r\n */\r\n this.activeObservations_ = [];\r\n /**\r\n * Registry of the ResizeObservation instances.\r\n *\r\n * @private {Map}\r\n */\r\n this.observations_ = new MapShim();\r\n if (typeof callback !== 'function') {\r\n throw new TypeError('The callback provided as parameter 1 is not a function.');\r\n }\r\n this.callback_ = callback;\r\n this.controller_ = controller;\r\n this.callbackCtx_ = callbackCtx;\r\n }\r\n /**\r\n * Starts observing provided element.\r\n *\r\n * @param {Element} target - Element to be observed.\r\n * @returns {void}\r\n */\r\n ResizeObserverSPI.prototype.observe = function (target) {\r\n if (!arguments.length) {\r\n throw new TypeError('1 argument required, but only 0 present.');\r\n }\r\n // Do nothing if current environment doesn't have the Element interface.\r\n if (typeof Element === 'undefined' || !(Element instanceof Object)) {\r\n return;\r\n }\r\n if (!(target instanceof getWindowOf(target).Element)) {\r\n throw new TypeError('parameter 1 is not of type \"Element\".');\r\n }\r\n var observations = this.observations_;\r\n // Do nothing if element is already being observed.\r\n if (observations.has(target)) {\r\n return;\r\n }\r\n observations.set(target, new ResizeObservation(target));\r\n this.controller_.addObserver(this);\r\n // Force the update of observations.\r\n this.controller_.refresh();\r\n };\r\n /**\r\n * Stops observing provided element.\r\n *\r\n * @param {Element} target - Element to stop observing.\r\n * @returns {void}\r\n */\r\n ResizeObserverSPI.prototype.unobserve = function (target) {\r\n if (!arguments.length) {\r\n throw new TypeError('1 argument required, but only 0 present.');\r\n }\r\n // Do nothing if current environment doesn't have the Element interface.\r\n if (typeof Element === 'undefined' || !(Element instanceof Object)) {\r\n return;\r\n }\r\n if (!(target instanceof getWindowOf(target).Element)) {\r\n throw new TypeError('parameter 1 is not of type \"Element\".');\r\n }\r\n var observations = this.observations_;\r\n // Do nothing if element is not being observed.\r\n if (!observations.has(target)) {\r\n return;\r\n }\r\n observations.delete(target);\r\n if (!observations.size) {\r\n this.controller_.removeObserver(this);\r\n }\r\n };\r\n /**\r\n * Stops observing all elements.\r\n *\r\n * @returns {void}\r\n */\r\n ResizeObserverSPI.prototype.disconnect = function () {\r\n this.clearActive();\r\n this.observations_.clear();\r\n this.controller_.removeObserver(this);\r\n };\r\n /**\r\n * Collects observation instances the associated element of which has changed\r\n * it's content rectangle.\r\n *\r\n * @returns {void}\r\n */\r\n ResizeObserverSPI.prototype.gatherActive = function () {\r\n var _this = this;\r\n this.clearActive();\r\n this.observations_.forEach(function (observation) {\r\n if (observation.isActive()) {\r\n _this.activeObservations_.push(observation);\r\n }\r\n });\r\n };\r\n /**\r\n * Invokes initial callback function with a list of ResizeObserverEntry\r\n * instances collected from active resize observations.\r\n *\r\n * @returns {void}\r\n */\r\n ResizeObserverSPI.prototype.broadcastActive = function () {\r\n // Do nothing if observer doesn't have active observations.\r\n if (!this.hasActive()) {\r\n return;\r\n }\r\n var ctx = this.callbackCtx_;\r\n // Create ResizeObserverEntry instance for every active observation.\r\n var entries = this.activeObservations_.map(function (observation) {\r\n return new ResizeObserverEntry(observation.target, observation.broadcastRect());\r\n });\r\n this.callback_.call(ctx, entries, ctx);\r\n this.clearActive();\r\n };\r\n /**\r\n * Clears the collection of active observations.\r\n *\r\n * @returns {void}\r\n */\r\n ResizeObserverSPI.prototype.clearActive = function () {\r\n this.activeObservations_.splice(0);\r\n };\r\n /**\r\n * Tells whether observer has active observations.\r\n *\r\n * @returns {boolean}\r\n */\r\n ResizeObserverSPI.prototype.hasActive = function () {\r\n return this.activeObservations_.length > 0;\r\n };\r\n return ResizeObserverSPI;\r\n}());\n\n// Registry of internal observers. If WeakMap is not available use current shim\r\n// for the Map collection as it has all required methods and because WeakMap\r\n// can't be fully polyfilled anyway.\r\nvar observers = typeof WeakMap !== 'undefined' ? new WeakMap() : new MapShim();\r\n/**\r\n * ResizeObserver API. Encapsulates the ResizeObserver SPI implementation\r\n * exposing only those methods and properties that are defined in the spec.\r\n */\r\nvar ResizeObserver = /** @class */ (function () {\r\n /**\r\n * Creates a new instance of ResizeObserver.\r\n *\r\n * @param {ResizeObserverCallback} callback - Callback that is invoked when\r\n * dimensions of the observed elements change.\r\n */\r\n function ResizeObserver(callback) {\r\n if (!(this instanceof ResizeObserver)) {\r\n throw new TypeError('Cannot call a class as a function.');\r\n }\r\n if (!arguments.length) {\r\n throw new TypeError('1 argument required, but only 0 present.');\r\n }\r\n var controller = ResizeObserverController.getInstance();\r\n var observer = new ResizeObserverSPI(callback, controller, this);\r\n observers.set(this, observer);\r\n }\r\n return ResizeObserver;\r\n}());\r\n// Expose public methods of ResizeObserver.\r\n[\r\n 'observe',\r\n 'unobserve',\r\n 'disconnect'\r\n].forEach(function (method) {\r\n ResizeObserver.prototype[method] = function () {\r\n var _a;\r\n return (_a = observers.get(this))[method].apply(_a, arguments);\r\n };\r\n});\n\nvar index = (function () {\r\n // Export existing implementation if available.\r\n if (typeof global$1.ResizeObserver !== 'undefined') {\r\n return global$1.ResizeObserver;\r\n }\r\n return ResizeObserver;\r\n})();\n\nexport default index;\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport ResizeObserver from \"resize-observer-polyfill\"\nimport {\n NEVER,\n Observable,\n Subject,\n defer,\n filter,\n finalize,\n map,\n merge,\n of,\n shareReplay,\n startWith,\n switchMap,\n tap\n} from \"rxjs\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Element offset\n */\nexport interface ElementSize {\n width: number /* Element width */\n height: number /* Element height */\n}\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Resize observer entry subject\n */\nconst entry$ = new Subject()\n\n/**\n * Resize observer observable\n *\n * This observable will create a `ResizeObserver` on the first subscription\n * and will automatically terminate it when there are no more subscribers.\n * It's quite important to centralize observation in a single `ResizeObserver`,\n * as the performance difference can be quite dramatic, as the link shows.\n *\n * @see https://bit.ly/3iIYfEm - Google Groups on performance\n */\nconst observer$ = defer(() => of(\n new ResizeObserver(entries => {\n for (const entry of entries)\n entry$.next(entry)\n })\n))\n .pipe(\n switchMap(observer => merge(NEVER, of(observer))\n .pipe(\n finalize(() => observer.disconnect())\n )\n ),\n shareReplay(1)\n )\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve element size\n *\n * @param el - Element\n *\n * @returns Element size\n */\nexport function getElementSize(\n el: HTMLElement\n): ElementSize {\n return {\n width: el.offsetWidth,\n height: el.offsetHeight\n }\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch element size\n *\n * This function returns an observable that subscribes to a single internal\n * instance of `ResizeObserver` upon subscription, and emit resize events until\n * termination. Note that this function should not be called with the same\n * element twice, as the first unsubscription will terminate observation.\n *\n * Sadly, we can't use the `DOMRect` objects returned by the observer, because\n * we need the emitted values to be consistent with `getElementSize`, which will\n * return the used values (rounded) and not actual values (unrounded). Thus, we\n * use the `offset*` properties. See the linked GitHub issue.\n *\n * @see https://bit.ly/3m0k3he - GitHub issue\n *\n * @param el - Element\n *\n * @returns Element size observable\n */\nexport function watchElementSize(\n el: HTMLElement\n): Observable {\n return observer$\n .pipe(\n tap(observer => observer.observe(el)),\n switchMap(observer => entry$\n .pipe(\n filter(({ target }) => target === el),\n finalize(() => observer.unobserve(el)),\n map(() => getElementSize(el))\n )\n ),\n startWith(getElementSize(el))\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { ElementSize } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve element content size (= scroll width and height)\n *\n * @param el - Element\n *\n * @returns Element content size\n */\nexport function getElementContentSize(\n el: HTMLElement\n): ElementSize {\n return {\n width: el.scrollWidth,\n height: el.scrollHeight\n }\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n NEVER,\n Observable,\n Subject,\n defer,\n distinctUntilChanged,\n filter,\n finalize,\n map,\n merge,\n of,\n shareReplay,\n switchMap,\n tap\n} from \"rxjs\"\n\nimport {\n getElementContentSize,\n getElementSize,\n watchElementContentOffset\n} from \"~/browser\"\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Intersection observer entry subject\n */\nconst entry$ = new Subject()\n\n/**\n * Intersection observer observable\n *\n * This observable will create an `IntersectionObserver` on first subscription\n * and will automatically terminate it when there are no more subscribers.\n *\n * @see https://bit.ly/3iIYfEm - Google Groups on performance\n */\nconst observer$ = defer(() => of(\n new IntersectionObserver(entries => {\n for (const entry of entries)\n entry$.next(entry)\n }, {\n threshold: 1\n })\n))\n .pipe(\n switchMap(observer => merge(NEVER, of(observer))\n .pipe(\n finalize(() => observer.disconnect())\n )\n ),\n shareReplay(1)\n )\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch element visibility\n *\n * @param el - Element\n *\n * @returns Element visibility observable\n */\nexport function watchElementVisibility(\n el: HTMLElement\n): Observable {\n return observer$\n .pipe(\n tap(observer => observer.observe(el)),\n switchMap(observer => entry$\n .pipe(\n filter(({ target }) => target === el),\n finalize(() => observer.unobserve(el)),\n map(({ isIntersecting }) => isIntersecting)\n )\n )\n )\n}\n\n/**\n * Watch element boundary\n *\n * This function returns an observable which emits whether the bottom content\n * boundary (= scroll offset) of an element is within a certain threshold.\n *\n * @param el - Element\n * @param threshold - Threshold\n *\n * @returns Element boundary observable\n */\nexport function watchElementBoundary(\n el: HTMLElement, threshold = 16\n): Observable {\n return watchElementContentOffset(el)\n .pipe(\n map(({ y }) => {\n const visible = getElementSize(el)\n const content = getElementContentSize(el)\n return y >= (\n content.height - visible.height - threshold\n )\n }),\n distinctUntilChanged()\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n fromEvent,\n map,\n startWith\n} from \"rxjs\"\n\nimport { getElement } from \"../element\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Toggle\n */\nexport type Toggle =\n | \"drawer\" /* Toggle for drawer */\n | \"search\" /* Toggle for search */\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Toggle map\n */\nconst toggles: Record = {\n drawer: getElement(\"[data-md-toggle=drawer]\"),\n search: getElement(\"[data-md-toggle=search]\")\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve the value of a toggle\n *\n * @param name - Toggle\n *\n * @returns Toggle value\n */\nexport function getToggle(name: Toggle): boolean {\n return toggles[name].checked\n}\n\n/**\n * Set toggle\n *\n * Simulating a click event seems to be the most cross-browser compatible way\n * of changing the value while also emitting a `change` event. Before, Material\n * used `CustomEvent` to programmatically change the value of a toggle, but this\n * is a much simpler and cleaner solution which doesn't require a polyfill.\n *\n * @param name - Toggle\n * @param value - Toggle value\n */\nexport function setToggle(name: Toggle, value: boolean): void {\n if (toggles[name].checked !== value)\n toggles[name].click()\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch toggle\n *\n * @param name - Toggle\n *\n * @returns Toggle value observable\n */\nexport function watchToggle(name: Toggle): Observable {\n const el = toggles[name]\n return fromEvent(el, \"change\")\n .pipe(\n map(() => el.checked),\n startWith(el.checked)\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n filter,\n fromEvent,\n map,\n share\n} from \"rxjs\"\n\nimport { getActiveElement } from \"../element\"\nimport { getToggle } from \"../toggle\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Keyboard mode\n */\nexport type KeyboardMode =\n | \"global\" /* Global */\n | \"search\" /* Search is open */\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Keyboard\n */\nexport interface Keyboard {\n mode: KeyboardMode /* Keyboard mode */\n type: string /* Key type */\n claim(): void /* Key claim */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Check whether an element may receive keyboard input\n *\n * @param el - Element\n * @param type - Key type\n *\n * @returns Test result\n */\nfunction isSusceptibleToKeyboard(\n el: HTMLElement, type: string\n): boolean {\n switch (el.constructor) {\n\n /* Input elements */\n case HTMLInputElement:\n /* @ts-expect-error - omit unnecessary type cast */\n if (el.type === \"radio\")\n return /^Arrow/.test(type)\n else\n return true\n\n /* Select element and textarea */\n case HTMLSelectElement:\n case HTMLTextAreaElement:\n return true\n\n /* Everything else */\n default:\n return el.isContentEditable\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch keyboard\n *\n * @returns Keyboard observable\n */\nexport function watchKeyboard(): Observable {\n return fromEvent(window, \"keydown\")\n .pipe(\n filter(ev => !(ev.metaKey || ev.ctrlKey)),\n map(ev => ({\n mode: getToggle(\"search\") ? \"search\" : \"global\",\n type: ev.key,\n claim() {\n ev.preventDefault()\n ev.stopPropagation()\n }\n } as Keyboard)),\n filter(({ mode, type }) => {\n if (mode === \"global\") {\n const active = getActiveElement()\n if (typeof active !== \"undefined\")\n return !isSusceptibleToKeyboard(active, type)\n }\n return true\n }),\n share()\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { Subject } from \"rxjs\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve location\n *\n * This function returns a `URL` object (and not `Location`) to normalize the\n * typings across the application. Furthermore, locations need to be tracked\n * without setting them and `Location` is a singleton which represents the\n * current location.\n *\n * @returns URL\n */\nexport function getLocation(): URL {\n return new URL(location.href)\n}\n\n/**\n * Set location\n *\n * @param url - URL to change to\n */\nexport function setLocation(url: URL): void {\n location.href = url.href\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch location\n *\n * @returns Location subject\n */\nexport function watchLocation(): Subject {\n return new Subject()\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { JSX as JSXInternal } from \"preact\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * HTML attributes\n */\ntype Attributes =\n & JSXInternal.HTMLAttributes\n & JSXInternal.SVGAttributes\n & Record\n\n/**\n * Child element\n */\ntype Child =\n | HTMLElement\n | Text\n | string\n | number\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Append a child node to an element\n *\n * @param el - Element\n * @param child - Child node(s)\n */\nfunction appendChild(el: HTMLElement, child: Child | Child[]): void {\n\n /* Handle primitive types (including raw HTML) */\n if (typeof child === \"string\" || typeof child === \"number\") {\n el.innerHTML += child.toString()\n\n /* Handle nodes */\n } else if (child instanceof Node) {\n el.appendChild(child)\n\n /* Handle nested children */\n } else if (Array.isArray(child)) {\n for (const node of child)\n appendChild(el, node)\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * JSX factory\n *\n * @template T - Element type\n *\n * @param tag - HTML tag\n * @param attributes - HTML attributes\n * @param children - Child elements\n *\n * @returns Element\n */\nexport function h(\n tag: T, attributes?: Attributes | null, ...children: Child[]\n): HTMLElementTagNameMap[T]\n\nexport function h(\n tag: string, attributes?: Attributes | null, ...children: Child[]\n): T\n\nexport function h(\n tag: string, attributes?: Attributes | null, ...children: Child[]\n): T {\n const el = document.createElement(tag)\n\n /* Set attributes, if any */\n if (attributes)\n for (const attr of Object.keys(attributes))\n if (typeof attributes[attr] !== \"boolean\")\n el.setAttribute(attr, attributes[attr])\n else if (attributes[attr])\n el.setAttribute(attr, \"\")\n\n /* Append child nodes */\n for (const child of children)\n appendChild(el, child)\n\n /* Return element */\n return el as T\n}\n\n/* ----------------------------------------------------------------------------\n * Namespace\n * ------------------------------------------------------------------------- */\n\nexport declare namespace h {\n namespace JSX {\n type Element = HTMLElement\n type IntrinsicElements = JSXInternal.IntrinsicElements\n }\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Truncate a string after the given number of characters\n *\n * This is not a very reasonable approach, since the summaries kind of suck.\n * It would be better to create something more intelligent, highlighting the\n * search occurrences and making a better summary out of it, but this note was\n * written three years ago, so who knows if we'll ever fix it.\n *\n * @param value - Value to be truncated\n * @param n - Number of characters\n *\n * @returns Truncated value\n */\nexport function truncate(value: string, n: number): string {\n let i = n\n if (value.length > i) {\n while (value[i] !== \" \" && --i > 0) { /* keep eating */ }\n return `${value.substring(0, i)}...`\n }\n return value\n}\n\n/**\n * Round a number for display with repository facts\n *\n * This is a reverse-engineered version of GitHub's weird rounding algorithm\n * for stars, forks and all other numbers. While all numbers below `1,000` are\n * returned as-is, bigger numbers are converted to fixed numbers:\n *\n * - `1,049` => `1k`\n * - `1,050` => `1.1k`\n * - `1,949` => `1.9k`\n * - `1,950` => `2k`\n *\n * @param value - Original value\n *\n * @returns Rounded value\n */\nexport function round(value: number): string {\n if (value > 999) {\n const digits = +((value - 950) % 1000 > 99)\n return `${((value + 0.000001) / 1000).toFixed(digits)}k`\n } else {\n return value.toString()\n }\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n filter,\n fromEvent,\n map,\n shareReplay,\n startWith\n} from \"rxjs\"\n\nimport { getOptionalElement } from \"~/browser\"\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve location hash\n *\n * @returns Location hash\n */\nexport function getLocationHash(): string {\n return location.hash.substring(1)\n}\n\n/**\n * Set location hash\n *\n * Setting a new fragment identifier via `location.hash` will have no effect\n * if the value doesn't change. When a new fragment identifier is set, we want\n * the browser to target the respective element at all times, which is why we\n * use this dirty little trick.\n *\n * @param hash - Location hash\n */\nexport function setLocationHash(hash: string): void {\n const el = h(\"a\", { href: hash })\n el.addEventListener(\"click\", ev => ev.stopPropagation())\n el.click()\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch location hash\n *\n * @returns Location hash observable\n */\nexport function watchLocationHash(): Observable {\n return fromEvent(window, \"hashchange\")\n .pipe(\n map(getLocationHash),\n startWith(getLocationHash()),\n filter(hash => hash.length > 0),\n shareReplay(1)\n )\n}\n\n/**\n * Watch location target\n *\n * @returns Location target observable\n */\nexport function watchLocationTarget(): Observable {\n return watchLocationHash()\n .pipe(\n map(id => getOptionalElement(`[id=\"${id}\"]`)!),\n filter(el => typeof el !== \"undefined\")\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n EMPTY,\n Observable,\n fromEvent,\n fromEventPattern,\n mapTo,\n merge,\n startWith,\n switchMap\n} from \"rxjs\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch media query\n *\n * Note that although `MediaQueryList.addListener` is deprecated we have to\n * use it, because it's the only way to ensure proper downward compatibility.\n *\n * @see https://bit.ly/3dUBH2m - GitHub issue\n *\n * @param query - Media query\n *\n * @returns Media observable\n */\nexport function watchMedia(query: string): Observable {\n const media = matchMedia(query)\n return fromEventPattern(next => (\n media.addListener(() => next(media.matches))\n ))\n .pipe(\n startWith(media.matches)\n )\n}\n\n/**\n * Watch print mode\n *\n * @returns Print observable\n */\nexport function watchPrint(): Observable {\n const media = matchMedia(\"print\")\n return merge(\n fromEvent(window, \"beforeprint\").pipe(mapTo(true)),\n fromEvent(window, \"afterprint\").pipe(mapTo(false))\n )\n .pipe(\n startWith(media.matches)\n )\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Toggle an observable with a media observable\n *\n * @template T - Data type\n *\n * @param query$ - Media observable\n * @param factory - Observable factory\n *\n * @returns Toggled observable\n */\nexport function at(\n query$: Observable, factory: () => Observable\n): Observable {\n return query$\n .pipe(\n switchMap(active => active ? factory() : EMPTY)\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n EMPTY,\n Observable,\n catchError,\n filter,\n from,\n map,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch the given URL\n *\n * If the request fails (e.g. when dispatched from `file://` locations), the\n * observable will complete without emitting a value.\n *\n * @param url - Request URL\n * @param options - Options\n *\n * @returns Response observable\n */\nexport function request(\n url: URL | string, options: RequestInit = { credentials: \"same-origin\" }\n): Observable {\n return from(fetch(`${url}`, options))\n .pipe(\n filter(res => res.status === 200),\n catchError(() => EMPTY)\n )\n}\n\n/**\n * Fetch JSON from the given URL\n *\n * @template T - Data type\n *\n * @param url - Request URL\n * @param options - Options\n *\n * @returns Data observable\n */\nexport function requestJSON(\n url: URL | string, options?: RequestInit\n): Observable {\n return request(url, options)\n .pipe(\n switchMap(res => res.json()),\n shareReplay(1)\n )\n}\n\n/**\n * Fetch XML from the given URL\n *\n * @param url - Request URL\n * @param options - Options\n *\n * @returns Data observable\n */\nexport function requestXML(\n url: URL | string, options?: RequestInit\n): Observable {\n const dom = new DOMParser()\n return request(url, options)\n .pipe(\n switchMap(res => res.text()),\n map(res => dom.parseFromString(res, \"text/xml\")),\n shareReplay(1)\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n fromEvent,\n map,\n merge,\n startWith\n} from \"rxjs\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Viewport offset\n */\nexport interface ViewportOffset {\n x: number /* Horizontal offset */\n y: number /* Vertical offset */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve viewport offset\n *\n * On iOS Safari, viewport offset can be negative due to overflow scrolling.\n * As this may induce strange behaviors downstream, we'll just limit it to 0.\n *\n * @returns Viewport offset\n */\nexport function getViewportOffset(): ViewportOffset {\n return {\n x: Math.max(0, scrollX),\n y: Math.max(0, scrollY)\n }\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch viewport offset\n *\n * @returns Viewport offset observable\n */\nexport function watchViewportOffset(): Observable {\n return merge(\n fromEvent(window, \"scroll\", { passive: true }),\n fromEvent(window, \"resize\", { passive: true })\n )\n .pipe(\n map(getViewportOffset),\n startWith(getViewportOffset())\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n fromEvent,\n map,\n startWith\n} from \"rxjs\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Viewport size\n */\nexport interface ViewportSize {\n width: number /* Viewport width */\n height: number /* Viewport height */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve viewport size\n *\n * @returns Viewport size\n */\nexport function getViewportSize(): ViewportSize {\n return {\n width: innerWidth,\n height: innerHeight\n }\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch viewport size\n *\n * @returns Viewport size observable\n */\nexport function watchViewportSize(): Observable {\n return fromEvent(window, \"resize\", { passive: true })\n .pipe(\n map(getViewportSize),\n startWith(getViewportSize())\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n combineLatest,\n map,\n shareReplay\n} from \"rxjs\"\n\nimport {\n ViewportOffset,\n watchViewportOffset\n} from \"../offset\"\nimport {\n ViewportSize,\n watchViewportSize\n} from \"../size\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Viewport\n */\nexport interface Viewport {\n offset: ViewportOffset /* Viewport offset */\n size: ViewportSize /* Viewport size */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch viewport\n *\n * @returns Viewport observable\n */\nexport function watchViewport(): Observable {\n return combineLatest([\n watchViewportOffset(),\n watchViewportSize()\n ])\n .pipe(\n map(([offset, size]) => ({ offset, size })),\n shareReplay(1)\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n combineLatest,\n distinctUntilKeyChanged,\n map\n} from \"rxjs\"\n\nimport { Header } from \"~/components\"\n\nimport { getElementOffset } from \"../../element\"\nimport { Viewport } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n viewport$: Observable /* Viewport observable */\n header$: Observable
/* Header observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch viewport relative to element\n *\n * @param el - Element\n * @param options - Options\n *\n * @returns Viewport observable\n */\nexport function watchViewportAt(\n el: HTMLElement, { viewport$, header$ }: WatchOptions\n): Observable {\n const size$ = viewport$\n .pipe(\n distinctUntilKeyChanged(\"size\")\n )\n\n /* Compute element offset */\n const offset$ = combineLatest([size$, header$])\n .pipe(\n map(() => getElementOffset(el))\n )\n\n /* Compute relative viewport, return hot observable */\n return combineLatest([header$, viewport$, offset$])\n .pipe(\n map(([{ height }, { offset, size }, { x, y }]) => ({\n offset: {\n x: offset.x - x,\n y: offset.y - y + height\n },\n size\n }))\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n Subject,\n fromEvent,\n map,\n share,\n switchMapTo,\n tap,\n throttle\n} from \"rxjs\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Worker message\n */\nexport interface WorkerMessage {\n type: unknown /* Message type */\n data?: unknown /* Message data */\n}\n\n/**\n * Worker handler\n *\n * @template T - Message type\n */\nexport interface WorkerHandler<\n T extends WorkerMessage\n> {\n tx$: Subject /* Message transmission subject */\n rx$: Observable /* Message receive observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n *\n * @template T - Worker message type\n */\ninterface WatchOptions {\n tx$: Observable /* Message transmission observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch a web worker\n *\n * This function returns an observable that sends all values emitted by the\n * message observable to the web worker. Web worker communication is expected\n * to be bidirectional (request-response) and synchronous. Messages that are\n * emitted during a pending request are throttled, the last one is emitted.\n *\n * @param worker - Web worker\n * @param options - Options\n *\n * @returns Worker message observable\n */\nexport function watchWorker(\n worker: Worker, { tx$ }: WatchOptions\n): Observable {\n\n /* Intercept messages from worker-like objects */\n const rx$ = fromEvent(worker, \"message\")\n .pipe(\n map(({ data }) => data as T)\n )\n\n /* Send and receive messages, return hot observable */\n return tx$\n .pipe(\n throttle(() => rx$, { leading: true, trailing: true }),\n tap(message => worker.postMessage(message)),\n switchMapTo(rx$),\n share()\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { getElement, getLocation } from \"~/browser\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Feature flag\n */\nexport type Flag =\n | \"content.code.annotate\" /* Code annotations */\n | \"header.autohide\" /* Hide header */\n | \"navigation.expand\" /* Automatic expansion */\n | \"navigation.indexes\" /* Section pages */\n | \"navigation.instant\" /* Instant loading */\n | \"navigation.sections\" /* Section navigation */\n | \"navigation.tabs\" /* Tabs navigation */\n | \"navigation.tabs.sticky\" /* Tabs navigation (sticky) */\n | \"navigation.top\" /* Back-to-top button */\n | \"navigation.tracking\" /* Anchor tracking */\n | \"search.highlight\" /* Search highlighting */\n | \"search.share\" /* Search sharing */\n | \"search.suggest\" /* Search suggestions */\n | \"toc.integrate\" /* Integrated table of contents */\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Translation\n */\nexport type Translation =\n | \"clipboard.copy\" /* Copy to clipboard */\n | \"clipboard.copied\" /* Copied to clipboard */\n | \"search.config.lang\" /* Search language */\n | \"search.config.pipeline\" /* Search pipeline */\n | \"search.config.separator\" /* Search separator */\n | \"search.placeholder\" /* Search */\n | \"search.result.placeholder\" /* Type to start searching */\n | \"search.result.none\" /* No matching documents */\n | \"search.result.one\" /* 1 matching document */\n | \"search.result.other\" /* # matching documents */\n | \"search.result.more.one\" /* 1 more on this page */\n | \"search.result.more.other\" /* # more on this page */\n | \"search.result.term.missing\" /* Missing */\n | \"select.version.title\" /* Version selector */\n\n/**\n * Translations\n */\nexport type Translations = Record\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Versioning\n */\nexport interface Versioning {\n provider: \"mike\" /* Version provider */\n default?: string /* Default version */\n}\n\n/**\n * Configuration\n */\nexport interface Config {\n base: string /* Base URL */\n features: Flag[] /* Feature flags */\n translations: Translations /* Translations */\n search: string /* Search worker URL */\n version?: Versioning /* Versioning */\n}\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve global configuration and make base URL absolute\n */\nconst script = getElement(\"#__config\")\nconst config: Config = JSON.parse(script.textContent!)\nconfig.base = `${new URL(config.base, getLocation())}`\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve global configuration\n *\n * @returns Global configuration\n */\nexport function configuration(): Config {\n return config\n}\n\n/**\n * Check whether a feature flag is enabled\n *\n * @param flag - Feature flag\n *\n * @returns Test result\n */\nexport function feature(flag: Flag): boolean {\n return config.features.includes(flag)\n}\n\n/**\n * Retrieve the translation for the given key\n *\n * @param key - Key to be translated\n * @param value - Positional value, if any\n *\n * @returns Translation\n */\nexport function translation(\n key: Translation, value?: string | number\n): string {\n return typeof value !== \"undefined\"\n ? config.translations[key].replace(\"#\", value.toString())\n : config.translations[key]\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { getElement, getElements } from \"~/browser\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Component type\n */\nexport type ComponentType =\n | \"announce\" /* Announcement bar */\n | \"container\" /* Container */\n | \"content\" /* Content */\n | \"dialog\" /* Dialog */\n | \"header\" /* Header */\n | \"header-title\" /* Header title */\n | \"header-topic\" /* Header topic */\n | \"main\" /* Main area */\n | \"outdated\" /* Version warning */\n | \"palette\" /* Color palette */\n | \"search\" /* Search */\n | \"search-query\" /* Search input */\n | \"search-result\" /* Search results */\n | \"search-share\" /* Search sharing */\n | \"search-suggest\" /* Search suggestions */\n | \"sidebar\" /* Sidebar */\n | \"skip\" /* Skip link */\n | \"source\" /* Repository information */\n | \"tabs\" /* Navigation tabs */\n | \"toc\" /* Table of contents */\n | \"top\" /* Back-to-top button */\n\n/**\n * Component\n *\n * @template T - Component type\n * @template U - Reference type\n */\nexport type Component<\n T extends {} = {},\n U extends HTMLElement = HTMLElement\n> =\n T & {\n ref: U /* Component reference */\n }\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Component type map\n */\ninterface ComponentTypeMap {\n \"announce\": HTMLElement /* Announcement bar */\n \"container\": HTMLElement /* Container */\n \"content\": HTMLElement /* Content */\n \"dialog\": HTMLElement /* Dialog */\n \"header\": HTMLElement /* Header */\n \"header-title\": HTMLElement /* Header title */\n \"header-topic\": HTMLElement /* Header topic */\n \"main\": HTMLElement /* Main area */\n \"outdated\": HTMLElement /* Version warning */\n \"palette\": HTMLElement /* Color palette */\n \"search\": HTMLElement /* Search */\n \"search-query\": HTMLInputElement /* Search input */\n \"search-result\": HTMLElement /* Search results */\n \"search-share\": HTMLAnchorElement /* Search sharing */\n \"search-suggest\": HTMLElement /* Search suggestions */\n \"sidebar\": HTMLElement /* Sidebar */\n \"skip\": HTMLAnchorElement /* Skip link */\n \"source\": HTMLAnchorElement /* Repository information */\n \"tabs\": HTMLElement /* Navigation tabs */\n \"toc\": HTMLElement /* Table of contents */\n \"top\": HTMLAnchorElement /* Back-to-top button */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve the element for a given component or throw a reference error\n *\n * @template T - Component type\n *\n * @param type - Component type\n * @param node - Node of reference\n *\n * @returns Element\n */\nexport function getComponentElement(\n type: T, node: ParentNode = document\n): ComponentTypeMap[T] {\n return getElement(`[data-md-component=${type}]`, node)\n}\n\n/**\n * Retrieve all elements for a given component\n *\n * @template T - Component type\n *\n * @param type - Component type\n * @param node - Node of reference\n *\n * @returns Elements\n */\nexport function getComponentElements(\n type: T, node: ParentNode = document\n): ComponentTypeMap[T][] {\n return getElements(`[data-md-component=${type}]`, node)\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport ClipboardJS from \"clipboard\"\nimport {\n EMPTY,\n Observable,\n Subject,\n defer,\n distinctUntilChanged,\n distinctUntilKeyChanged,\n finalize,\n map,\n mergeWith,\n switchMap,\n takeLast,\n takeUntil,\n tap\n} from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport {\n getElementContentSize,\n watchElementSize\n} from \"~/browser\"\nimport { renderClipboardButton } from \"~/templates\"\n\nimport { Component } from \"../../_\"\nimport {\n Annotation,\n mountAnnotationList\n} from \"../annotation\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Code block\n */\nexport interface CodeBlock {\n scrollable: boolean /* Code block overflows */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n print$: Observable /* Media print observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Global sequence number for Clipboard.js integration\n */\nlet sequence = 0\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Find candidate list element directly following a code block\n *\n * @param el - Code block element\n *\n * @returns List element or nothing\n */\nfunction findCandidateList(el: HTMLElement): HTMLElement | undefined {\n if (el.nextElementSibling) {\n const sibling = el.nextElementSibling as HTMLElement\n if (sibling.tagName === \"OL\")\n return sibling\n\n /* Skip empty paragraphs - see https://bit.ly/3r4ZJ2O */\n else if (sibling.tagName === \"P\" && !sibling.children.length)\n return findCandidateList(sibling)\n }\n\n /* Everything else */\n return undefined\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch code block\n *\n * This function monitors size changes of the viewport, as well as switches of\n * content tabs with embedded code blocks, as both may trigger overflow.\n *\n * @param el - Code block element\n *\n * @returns Code block observable\n */\nexport function watchCodeBlock(\n el: HTMLElement\n): Observable {\n return watchElementSize(el)\n .pipe(\n map(({ width }) => {\n const content = getElementContentSize(el)\n return {\n scrollable: content.width > width\n }\n }),\n distinctUntilKeyChanged(\"scrollable\")\n )\n}\n\n/**\n * Mount code block\n *\n * This function ensures that an overflowing code block is focusable through\n * keyboard, so it can be scrolled without a mouse to improve on accessibility.\n * Furthermore, if code annotations are enabled, they are mounted if and only\n * if the code block is currently visible, e.g., not in a hidden content tab.\n *\n * @param el - Code block element\n * @param options - Options\n *\n * @returns Code block and annotation component observable\n */\nexport function mountCodeBlock(\n el: HTMLElement, options: MountOptions\n): Observable> {\n const { matches: hover } = matchMedia(\"(hover)\")\n return defer(() => {\n const push$ = new Subject()\n push$.subscribe(({ scrollable }) => {\n if (scrollable && hover)\n el.setAttribute(\"tabindex\", \"0\")\n else\n el.removeAttribute(\"tabindex\")\n })\n\n /* Render button for Clipboard.js integration */\n if (ClipboardJS.isSupported()) {\n const parent = el.closest(\"pre\")!\n parent.id = `__code_${++sequence}`\n parent.insertBefore(\n renderClipboardButton(parent.id),\n el\n )\n }\n\n /* Handle code annotations */\n const container = el.closest([\n \":not(td):not(.code) > .highlight\",\n \".highlighttable\"\n ].join(\", \"))\n if (container instanceof HTMLElement) {\n const list = findCandidateList(container)\n\n /* Mount code annotations, if enabled */\n if (typeof list !== \"undefined\" && (\n container.classList.contains(\"annotate\") ||\n feature(\"content.code.annotate\")\n )) {\n const annotations$ = mountAnnotationList(list, el, options)\n\n /* Create and return component */\n return watchCodeBlock(el)\n .pipe(\n tap(state => push$.next(state)),\n finalize(() => push$.complete()),\n map(state => ({ ref: el, ...state })),\n mergeWith(watchElementSize(container)\n .pipe(\n takeUntil(push$.pipe(takeLast(1))),\n map(({ width, height }) => width && height),\n distinctUntilChanged(),\n switchMap(active => active ? annotations$ : EMPTY)\n )\n )\n )\n }\n }\n\n /* Create and return component */\n return watchCodeBlock(el)\n .pipe(\n tap(state => push$.next(state)),\n finalize(() => push$.complete()),\n map(state => ({ ref: el, ...state }))\n )\n })\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render an empty annotation\n *\n * @param id - Annotation identifier\n *\n * @returns Element\n */\nexport function renderAnnotation(id: number): HTMLElement {\n return (\n \n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { translation } from \"~/_\"\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render a 'copy-to-clipboard' button\n *\n * @param id - Unique identifier\n *\n * @returns Element\n */\nexport function renderClipboardButton(id: string): HTMLElement {\n return (\n code`}\n >\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { ComponentChild } from \"preact\"\n\nimport { feature, translation } from \"~/_\"\nimport {\n SearchDocument,\n SearchMetadata,\n SearchResultItem\n} from \"~/integrations/search\"\nimport { h, truncate } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Render flag\n */\nconst enum Flag {\n TEASER = 1, /* Render teaser */\n PARENT = 2 /* Render as parent */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper function\n * ------------------------------------------------------------------------- */\n\n/**\n * Render a search document\n *\n * @param document - Search document\n * @param flag - Render flags\n *\n * @returns Element\n */\nfunction renderSearchDocument(\n document: SearchDocument & SearchMetadata, flag: Flag\n): HTMLElement {\n const parent = flag & Flag.PARENT\n const teaser = flag & Flag.TEASER\n\n /* Render missing query terms */\n const missing = Object.keys(document.terms)\n .filter(key => !document.terms[key])\n .reduce((list, key) => [\n ...list, {key}, \" \"\n ], [])\n .slice(0, -1)\n\n /* Assemble query string for highlighting */\n const url = new URL(document.location)\n if (feature(\"search.highlight\"))\n url.searchParams.set(\"h\", Object.entries(document.terms)\n .filter(([, match]) => match)\n .reduce((highlight, [value]) => `${highlight} ${value}`.trim(), \"\")\n )\n\n /* Render article or section, depending on flags */\n return (\n \n \n {parent > 0 &&
}\n

{document.title}

\n {teaser > 0 && document.text.length > 0 &&\n

\n {truncate(document.text, 320)}\n

\n }\n {teaser > 0 && missing.length > 0 &&\n

\n {translation(\"search.result.term.missing\")}: {...missing}\n

\n }\n \n
\n )\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render a search result\n *\n * @param result - Search result\n *\n * @returns Element\n */\nexport function renderSearchResultItem(\n result: SearchResultItem\n): HTMLElement {\n const threshold = result[0].score\n const docs = [...result]\n\n /* Find and extract parent article */\n const parent = docs.findIndex(doc => !doc.location.includes(\"#\"))\n const [article] = docs.splice(parent, 1)\n\n /* Determine last index above threshold */\n let index = docs.findIndex(doc => doc.score < threshold)\n if (index === -1)\n index = docs.length\n\n /* Partition sections */\n const best = docs.slice(0, index)\n const more = docs.slice(index)\n\n /* Render children */\n const children = [\n renderSearchDocument(article, Flag.PARENT | +(!parent && index === 0)),\n ...best.map(section => renderSearchDocument(section, Flag.TEASER)),\n ...more.length ? [\n
\n \n {more.length > 0 && more.length === 1\n ? translation(\"search.result.more.one\")\n : translation(\"search.result.more.other\", more.length)\n }\n \n {...more.map(section => renderSearchDocument(section, Flag.TEASER))}\n
\n ] : []\n ]\n\n /* Render search result */\n return (\n
  • \n {children}\n
  • \n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { SourceFacts } from \"~/components\"\nimport { h, round } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render repository facts\n *\n * @param facts - Repository facts\n *\n * @returns Element\n */\nexport function renderSourceFacts(facts: SourceFacts): HTMLElement {\n return (\n
      \n {Object.entries(facts).map(([key, value]) => (\n
    • \n {typeof value === \"number\" ? round(value) : value}\n
    • \n ))}\n
    \n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render a table inside a wrapper to improve scrolling on mobile\n *\n * @param table - Table element\n *\n * @returns Element\n */\nexport function renderTable(table: HTMLElement): HTMLElement {\n return (\n
    \n
    \n {table}\n
    \n
    \n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { configuration, translation } from \"~/_\"\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Version\n */\nexport interface Version {\n version: string /* Version identifier */\n title: string /* Version title */\n aliases: string[] /* Version aliases */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render a version\n *\n * @param version - Version\n *\n * @returns Element\n */\nfunction renderVersion(version: Version): HTMLElement {\n const config = configuration()\n\n /* Ensure trailing slash, see https://bit.ly/3rL5u3f */\n const url = new URL(`../${version.version}/`, config.base)\n return (\n
  • \n \n {version.title}\n \n
  • \n )\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render a version selector\n *\n * @param versions - Versions\n * @param active - Active version\n *\n * @returns Element\n */\nexport function renderVersionSelector(\n versions: Version[], active: Version\n): HTMLElement {\n return (\n
    \n \n {active.title}\n \n
      \n {versions.map(renderVersion)}\n
    \n
    \n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n EMPTY,\n Observable,\n Subject,\n animationFrameScheduler,\n combineLatest,\n defer,\n finalize,\n fromEvent,\n map,\n switchMap,\n take,\n tap,\n throttleTime\n} from \"rxjs\"\n\nimport {\n ElementOffset,\n getElement,\n getElementSize,\n watchElementContentOffset,\n watchElementFocus,\n watchElementOffset\n} from \"~/browser\"\n\nimport { Component } from \"../../../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Annotation\n */\nexport interface Annotation {\n active: boolean /* Annotation is active */\n offset: ElementOffset /* Annotation offset */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch annotation\n *\n * @param el - Annotation element\n * @param container - Containing element\n *\n * @returns Annotation observable\n */\nexport function watchAnnotation(\n el: HTMLElement, container: HTMLElement\n): Observable {\n const offset$ = defer(() => combineLatest([\n watchElementOffset(el),\n watchElementContentOffset(container)\n ]))\n .pipe(\n map(([{ x, y }, scroll]) => {\n const { width } = getElementSize(el)\n return ({\n x: x - scroll.x + width / 2,\n y: y - scroll.y\n })\n })\n )\n\n /* Actively watch annotation on focus */\n return watchElementFocus(el)\n .pipe(\n switchMap(active => offset$\n .pipe(\n map(offset => ({ active, offset })),\n take(+!active || Infinity)\n )\n )\n )\n}\n\n/**\n * Mount annotation\n *\n * @param el - Annotation element\n * @param container - Containing element\n *\n * @returns Annotation component observable\n */\nexport function mountAnnotation(\n el: HTMLElement, container: HTMLElement\n): Observable> {\n return defer(() => {\n const push$ = new Subject()\n push$.subscribe({\n\n /* Handle emission */\n next({ offset }) {\n el.style.setProperty(\"--md-tooltip-x\", `${offset.x}px`)\n el.style.setProperty(\"--md-tooltip-y\", `${offset.y}px`)\n },\n\n /* Handle complete */\n complete() {\n el.style.removeProperty(\"--md-tooltip-x\")\n el.style.removeProperty(\"--md-tooltip-y\")\n }\n })\n\n /* Track relative origin of tooltip */\n push$\n .pipe(\n throttleTime(500, animationFrameScheduler),\n map(() => container.getBoundingClientRect()),\n map(({ x }) => x)\n )\n .subscribe({\n\n /* Handle emission */\n next(origin) {\n if (origin)\n el.style.setProperty(\"--md-tooltip-0\", `${-origin}px`)\n else\n el.style.removeProperty(\"--md-tooltip-0\")\n },\n\n /* Handle complete */\n complete() {\n el.style.removeProperty(\"--md-tooltip-0\")\n }\n })\n\n /* Close open annotation on click */\n const index = getElement(\":scope > :last-child\", el)\n const blur$ = fromEvent(index, \"mousedown\", { once: true })\n push$\n .pipe(\n switchMap(({ active }) => active ? blur$ : EMPTY),\n tap(ev => ev.preventDefault())\n )\n .subscribe(() => el.blur())\n\n /* Create and return component */\n return watchAnnotation(el, container)\n .pipe(\n tap(state => push$.next(state)),\n finalize(() => push$.complete()),\n map(state => ({ ref: el, ...state }))\n )\n })\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n EMPTY,\n Observable,\n Subject,\n defer,\n finalize,\n merge,\n share,\n takeLast,\n takeUntil\n} from \"rxjs\"\n\nimport {\n getElement,\n getElements,\n getOptionalElement\n} from \"~/browser\"\nimport { renderAnnotation } from \"~/templates\"\n\nimport { Component } from \"../../../_\"\nimport {\n Annotation,\n mountAnnotation\n} from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n print$: Observable /* Media print observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Find all annotation markers in the given code block\n *\n * @param container - Containing element\n *\n * @returns Annotation markers\n */\nfunction findAnnotationMarkers(container: HTMLElement): Text[] {\n const markers: Text[] = []\n for (const comment of getElements(\".c, .c1, .cm\", container)) {\n let match: RegExpExecArray | null\n let text = comment.firstChild as Text\n\n /* Split text at marker and add to list */\n while ((match = /\\((\\d+)\\)/.exec(text.textContent!))) {\n const marker = text.splitText(match.index)\n text = marker.splitText(match[0].length)\n markers.push(marker)\n }\n }\n return markers\n}\n\n/**\n * Swap the child nodes of two elements\n *\n * @param source - Source element\n * @param target - Target element\n */\nfunction swap(source: HTMLElement, target: HTMLElement): void {\n target.append(...Array.from(source.childNodes))\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount annotation list\n *\n * This function analyzes the containing code block and checks for markers\n * referring to elements in the given annotation list. If no markers are found,\n * the list is left untouched. Otherwise, list elements are rendered as\n * annotations inside the code block.\n *\n * @param el - Annotation list element\n * @param container - Containing element\n * @param options - Options\n *\n * @returns Annotation component observable\n */\nexport function mountAnnotationList(\n el: HTMLElement, container: HTMLElement, { print$ }: MountOptions\n): Observable> {\n\n /* Find and replace all markers with empty annotations */\n const annotations = new Map()\n for (const marker of findAnnotationMarkers(container)) {\n const [, id] = marker.textContent!.match(/\\((\\d+)\\)/)!\n if (getOptionalElement(`li:nth-child(${id})`, el)) {\n annotations.set(+id, renderAnnotation(+id))\n marker.replaceWith(annotations.get(+id)!)\n }\n }\n\n /* Keep list if there are no annotations to render */\n if (annotations.size === 0)\n return EMPTY\n\n /* Create and return component */\n return defer(() => {\n const done$ = new Subject()\n\n /* Handle print mode - see https://bit.ly/3rgPdpt */\n print$\n .pipe(\n takeUntil(done$.pipe(takeLast(1)))\n )\n .subscribe(active => {\n el.hidden = !active\n\n /* Show annotations in code block or list (print) */\n for (const [id, annotation] of annotations) {\n const inner = getElement(\".md-typeset\", annotation)\n const child = getElement(`li:nth-child(${id})`, el)\n if (!active)\n swap(child, inner)\n else\n swap(inner, child)\n }\n })\n\n /* Create and return component */\n return merge(...[...annotations]\n .map(([, annotation]) => (\n mountAnnotation(annotation, container)\n ))\n )\n .pipe(\n finalize(() => done$.complete()),\n share()\n )\n })\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n Subject,\n defer,\n filter,\n finalize,\n map,\n mapTo,\n merge,\n tap\n} from \"rxjs\"\n\nimport { Component } from \"../../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Details\n */\nexport interface Details {\n action: \"open\" | \"close\" /* Details state */\n reveal?: boolean /* Details is revealed */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n target$: Observable /* Location target observable */\n print$: Observable /* Media print observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n target$: Observable /* Location target observable */\n print$: Observable /* Media print observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch details\n *\n * @param el - Details element\n * @param options - Options\n *\n * @returns Details observable\n */\nexport function watchDetails(\n el: HTMLDetailsElement, { target$, print$ }: WatchOptions\n): Observable
    {\n let open = true\n return merge(\n\n /* Open and focus details on location target */\n target$\n .pipe(\n map(target => target.closest(\"details:not([open])\")!),\n filter(details => el === details),\n mapTo
    ({ action: \"open\", reveal: true })\n ),\n\n /* Open details on print and close afterwards */\n print$\n .pipe(\n filter(active => active || !open),\n tap(() => open = el.open),\n map(active => ({\n action: active ? \"open\" : \"close\"\n }) as Details)\n )\n )\n}\n\n/**\n * Mount details\n *\n * This function ensures that `details` tags are opened on anchor jumps and\n * prior to printing, so the whole content of the page is visible.\n *\n * @param el - Details element\n * @param options - Options\n *\n * @returns Details component observable\n */\nexport function mountDetails(\n el: HTMLDetailsElement, options: MountOptions\n): Observable> {\n return defer(() => {\n const push$ = new Subject
    ()\n push$.subscribe(({ action, reveal }) => {\n if (action === \"open\")\n el.setAttribute(\"open\", \"\")\n else\n el.removeAttribute(\"open\")\n if (reveal)\n el.scrollIntoView()\n })\n\n /* Create and return component */\n return watchDetails(el, options)\n .pipe(\n tap(state => push$.next(state)),\n finalize(() => push$.complete()),\n map(state => ({ ref: el, ...state }))\n )\n })\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { Observable, of } from \"rxjs\"\n\nimport { renderTable } from \"~/templates\"\nimport { h } from \"~/utilities\"\n\nimport { Component } from \"../../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Data table\n */\nexport interface DataTable {}\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Sentinel for replacement\n */\nconst sentinel = h(\"table\")\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount data table\n *\n * This function wraps a data table in another scrollable container, so it can\n * be smoothly scrolled on smaller screen sizes and won't break the layout.\n *\n * @param el - Data table element\n *\n * @returns Data table component observable\n */\nexport function mountDataTable(\n el: HTMLElement\n): Observable> {\n el.replaceWith(sentinel)\n sentinel.replaceWith(renderTable(el))\n\n /* Create and return component */\n return of({ ref: el })\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n Subject,\n animationFrameScheduler,\n auditTime,\n combineLatest,\n defer,\n finalize,\n fromEvent,\n map,\n mapTo,\n merge,\n startWith,\n takeLast,\n takeUntil,\n tap\n} from \"rxjs\"\n\nimport {\n getElement,\n getElementOffset,\n getElementSize,\n getElements,\n watchElementSize\n} from \"~/browser\"\n\nimport { Component } from \"../../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Content tabs\n */\nexport interface ContentTabs {\n active: HTMLLabelElement /* Active tab label */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch content tabs\n *\n * @param el - Content tabs element\n *\n * @returns Content tabs observable\n */\nexport function watchContentTabs(\n el: HTMLElement\n): Observable {\n const inputs = getElements(\":scope > input\", el)\n return merge(...inputs.map(input => fromEvent(input, \"change\")\n .pipe(\n mapTo({\n active: getElement(`label[for=${input.id}]`)\n })\n )\n ))\n .pipe(\n startWith({\n active: getElement(`label[for=${inputs[0].id}]`)\n } as ContentTabs)\n )\n}\n\n/**\n * Mount content tabs\n *\n * This function scrolls the active tab into view. While this functionality is\n * provided by browsers as part of `scrollInfoView`, browsers will always also\n * scroll the vertical axis, which we do not want. Thus, we decided to provide\n * this functionality ourselves.\n *\n * @param el - Content tabs element\n *\n * @returns Content tabs component observable\n */\nexport function mountContentTabs(\n el: HTMLElement\n): Observable> {\n const container = getElement(\".tabbed-labels\", el)\n return defer(() => {\n const push$ = new Subject()\n combineLatest([push$, watchElementSize(el)])\n .pipe(\n auditTime(1, animationFrameScheduler),\n takeUntil(push$.pipe(takeLast(1)))\n )\n .subscribe({\n\n /* Handle emission */\n next([{ active }]) {\n const offset = getElementOffset(active)\n const { width } = getElementSize(active)\n\n /* Set tab indicator offset and width */\n el.style.setProperty(\"--md-indicator-x\", `${offset.x}px`)\n el.style.setProperty(\"--md-indicator-width\", `${width}px`)\n\n /* Smoothly scroll container */\n container.scrollTo({\n behavior: \"smooth\",\n left: offset.x\n })\n },\n\n /* Handle complete */\n complete() {\n el.style.removeProperty(\"--md-indicator-x\")\n el.style.removeProperty(\"--md-indicator-width\")\n }\n })\n\n /* Create and return component */\n return watchContentTabs(el)\n .pipe(\n tap(state => push$.next(state)),\n finalize(() => push$.complete()),\n map(state => ({ ref: el, ...state }))\n )\n })\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { Observable, merge } from \"rxjs\"\n\nimport { getElements } from \"~/browser\"\n\nimport { Component } from \"../../_\"\nimport { Annotation } from \"../annotation\"\nimport { CodeBlock, mountCodeBlock } from \"../code\"\nimport { Details, mountDetails } from \"../details\"\nimport { DataTable, mountDataTable } from \"../table\"\nimport { ContentTabs, mountContentTabs } from \"../tabs\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Content\n */\nexport type Content =\n | Annotation\n | ContentTabs\n | CodeBlock\n | DataTable\n | Details\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n target$: Observable /* Location target observable */\n print$: Observable /* Media print observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount content\n *\n * This function mounts all components that are found in the content of the\n * actual article, including code blocks, data tables and details.\n *\n * @param el - Content element\n * @param options - Options\n *\n * @returns Content component observable\n */\nexport function mountContent(\n el: HTMLElement, { target$, print$ }: MountOptions\n): Observable> {\n return merge(\n\n /* Code blocks */\n ...getElements(\"pre > code\", el)\n .map(child => mountCodeBlock(child, { print$ })),\n\n /* Data tables */\n ...getElements(\"table:not([class])\", el)\n .map(child => mountDataTable(child)),\n\n /* Details */\n ...getElements(\"details\", el)\n .map(child => mountDetails(child, { target$, print$ })),\n\n /* Content tabs */\n ...getElements(\"[data-tabs]\", el)\n .map(child => mountContentTabs(child))\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n Subject,\n defer,\n delay,\n finalize,\n map,\n merge,\n of,\n switchMap,\n tap\n} from \"rxjs\"\n\nimport { getElement } from \"~/browser\"\n\nimport { Component } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Dialog\n */\nexport interface Dialog {\n message: string /* Dialog message */\n active: boolean /* Dialog is active */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n alert$: Subject /* Alert subject */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n alert$: Subject /* Alert subject */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch dialog\n *\n * @param _el - Dialog element\n * @param options - Options\n *\n * @returns Dialog observable\n */\nexport function watchDialog(\n _el: HTMLElement, { alert$ }: WatchOptions\n): Observable {\n return alert$\n .pipe(\n switchMap(message => merge(\n of(true),\n of(false).pipe(delay(2000))\n )\n .pipe(\n map(active => ({ message, active }))\n )\n )\n )\n}\n\n/**\n * Mount dialog\n *\n * This function reveals the dialog in the right corner when a new alert is\n * emitted through the subject that is passed as part of the options.\n *\n * @param el - Dialog element\n * @param options - Options\n *\n * @returns Dialog component observable\n */\nexport function mountDialog(\n el: HTMLElement, options: MountOptions\n): Observable> {\n const inner = getElement(\".md-typeset\", el)\n return defer(() => {\n const push$ = new Subject()\n push$.subscribe(({ message, active }) => {\n inner.textContent = message\n if (active)\n el.setAttribute(\"data-md-state\", \"open\")\n else\n el.removeAttribute(\"data-md-state\")\n })\n\n /* Create and return component */\n return watchDialog(el, options)\n .pipe(\n tap(state => push$.next(state)),\n finalize(() => push$.complete()),\n map(state => ({ ref: el, ...state }))\n )\n })\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n Subject,\n bufferCount,\n combineLatest,\n combineLatestWith,\n defer,\n distinctUntilChanged,\n distinctUntilKeyChanged,\n filter,\n map,\n of,\n shareReplay,\n startWith,\n switchMap,\n takeLast,\n takeUntil\n} from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport {\n Viewport,\n watchElementSize,\n watchToggle\n} from \"~/browser\"\n\nimport { Component } from \"../../_\"\nimport { Main } from \"../../main\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Header\n */\nexport interface Header {\n height: number /* Header visible height */\n sticky: boolean /* Header stickyness */\n hidden: boolean /* Header is hidden */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n viewport$: Observable /* Viewport observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n viewport$: Observable /* Viewport observable */\n header$: Observable
    /* Header observable */\n main$: Observable
    /* Main area observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Compute whether the header is hidden\n *\n * If the user scrolls past a certain threshold, the header can be hidden when\n * scrolling down, and shown when scrolling up.\n *\n * @param options - Options\n *\n * @returns Toggle observable\n */\nfunction isHidden({ viewport$ }: WatchOptions): Observable {\n if (!feature(\"header.autohide\"))\n return of(false)\n\n /* Compute direction and turning point */\n const direction$ = viewport$\n .pipe(\n map(({ offset: { y } }) => y),\n bufferCount(2, 1),\n map(([a, b]) => [a < b, b] as const),\n distinctUntilKeyChanged(0)\n )\n\n /* Compute whether header should be hidden */\n const hidden$ = combineLatest([viewport$, direction$])\n .pipe(\n filter(([{ offset }, [, y]]) => Math.abs(y - offset.y) > 100),\n map(([, [direction]]) => direction),\n distinctUntilChanged()\n )\n\n /* Compute threshold for hiding */\n const search$ = watchToggle(\"search\")\n return combineLatest([viewport$, search$])\n .pipe(\n map(([{ offset }, search]) => offset.y > 400 && !search),\n distinctUntilChanged(),\n switchMap(active => active ? hidden$ : of(false)),\n startWith(false)\n )\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch header\n *\n * @param el - Header element\n * @param options - Options\n *\n * @returns Header observable\n */\nexport function watchHeader(\n el: HTMLElement, options: WatchOptions\n): Observable
    {\n return defer(() => {\n const styles = getComputedStyle(el)\n return of(\n styles.position === \"sticky\" ||\n styles.position === \"-webkit-sticky\"\n )\n })\n .pipe(\n combineLatestWith(watchElementSize(el), isHidden(options)),\n map(([sticky, { height }, hidden]) => ({\n height: sticky ? height : 0,\n sticky,\n hidden\n })),\n distinctUntilChanged((a, b) => (\n a.sticky === b.sticky &&\n a.height === b.height &&\n a.hidden === b.hidden\n )),\n shareReplay(1)\n )\n}\n\n/**\n * Mount header\n *\n * This function manages the different states of the header, i.e. whether it's\n * hidden or rendered with a shadow. This depends heavily on the main area.\n *\n * @param el - Header element\n * @param options - Options\n *\n * @returns Header component observable\n */\nexport function mountHeader(\n el: HTMLElement, { header$, main$ }: MountOptions\n): Observable> {\n return defer(() => {\n const push$ = new Subject
    ()\n push$\n .pipe(\n distinctUntilKeyChanged(\"active\"),\n combineLatestWith(header$)\n )\n .subscribe(([{ active }, { hidden }]) => {\n if (active)\n el.setAttribute(\"data-md-state\", hidden ? \"hidden\" : \"shadow\")\n else\n el.removeAttribute(\"data-md-state\")\n })\n\n /* Link to main area */\n main$.subscribe(push$)\n\n /* Create and return component */\n return header$\n .pipe(\n takeUntil(push$.pipe(takeLast(1))),\n map(state => ({ ref: el, ...state }))\n )\n })\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n EMPTY,\n Observable,\n Subject,\n defer,\n distinctUntilKeyChanged,\n finalize,\n map,\n tap\n} from \"rxjs\"\n\nimport {\n Viewport,\n getElementSize,\n getOptionalElement,\n watchViewportAt\n} from \"~/browser\"\n\nimport { Component } from \"../../_\"\nimport { Header } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Header\n */\nexport interface HeaderTitle {\n active: boolean /* Header title is active */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n viewport$: Observable /* Viewport observable */\n header$: Observable
    /* Header observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n viewport$: Observable /* Viewport observable */\n header$: Observable
    /* Header observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch header title\n *\n * @param el - Heading element\n * @param options - Options\n *\n * @returns Header title observable\n */\nexport function watchHeaderTitle(\n el: HTMLElement, { viewport$, header$ }: WatchOptions\n): Observable {\n return watchViewportAt(el, { viewport$, header$ })\n .pipe(\n map(({ offset: { y } }) => {\n const { height } = getElementSize(el)\n return {\n active: y >= height\n }\n }),\n distinctUntilKeyChanged(\"active\")\n )\n}\n\n/**\n * Mount header title\n *\n * This function swaps the header title from the site title to the title of the\n * current page when the user scrolls past the first headline.\n *\n * @param el - Header title element\n * @param options - Options\n *\n * @returns Header title component observable\n */\nexport function mountHeaderTitle(\n el: HTMLElement, options: MountOptions\n): Observable> {\n return defer(() => {\n const push$ = new Subject()\n push$.subscribe(({ active }) => {\n if (active)\n el.setAttribute(\"data-md-state\", \"active\")\n else\n el.removeAttribute(\"data-md-state\")\n })\n\n /* Obtain headline, if any */\n const heading = getOptionalElement(\"article h1\")\n if (typeof heading === \"undefined\")\n return EMPTY\n\n /* Create and return component */\n return watchHeaderTitle(heading, options)\n .pipe(\n tap(state => push$.next(state)),\n finalize(() => push$.complete()),\n map(state => ({ ref: el, ...state }))\n )\n })\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n combineLatest,\n distinctUntilChanged,\n distinctUntilKeyChanged,\n map,\n switchMap\n} from \"rxjs\"\n\nimport {\n Viewport,\n watchElementSize\n} from \"~/browser\"\n\nimport { Header } from \"../header\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Main area\n */\nexport interface Main {\n offset: number /* Main area top offset */\n height: number /* Main area visible height */\n active: boolean /* Main area is active */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n viewport$: Observable /* Viewport observable */\n header$: Observable
    /* Header observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch main area\n *\n * This function returns an observable that computes the visual parameters of\n * the main area which depends on the viewport vertical offset and height, as\n * well as the height of the header element, if the header is fixed.\n *\n * @param el - Main area element\n * @param options - Options\n *\n * @returns Main area observable\n */\nexport function watchMain(\n el: HTMLElement, { viewport$, header$ }: WatchOptions\n): Observable
    {\n\n /* Compute necessary adjustment for header */\n const adjust$ = header$\n .pipe(\n map(({ height }) => height),\n distinctUntilChanged()\n )\n\n /* Compute the main area's top and bottom borders */\n const border$ = adjust$\n .pipe(\n switchMap(() => watchElementSize(el)\n .pipe(\n map(({ height }) => ({\n top: el.offsetTop,\n bottom: el.offsetTop + height\n })),\n distinctUntilKeyChanged(\"bottom\")\n )\n )\n )\n\n /* Compute the main area's offset, visible height and if we scrolled past */\n return combineLatest([adjust$, border$, viewport$])\n .pipe(\n map(([header, { top, bottom }, { offset: { y }, size: { height } }]) => {\n height = Math.max(0, height\n - Math.max(0, top - y, header)\n - Math.max(0, height + y - bottom)\n )\n return {\n offset: top - header,\n height,\n active: top - header <= y\n }\n }),\n distinctUntilChanged((a, b) => (\n a.offset === b.offset &&\n a.height === b.height &&\n a.active === b.active\n ))\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n Subject,\n defer,\n finalize,\n fromEvent,\n map,\n mapTo,\n mergeMap,\n of,\n shareReplay,\n startWith,\n tap\n} from \"rxjs\"\n\nimport { getElements } from \"~/browser\"\n\nimport { Component } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Palette colors\n */\nexport interface PaletteColor {\n scheme?: string /* Color scheme */\n primary?: string /* Primary color */\n accent?: string /* Accent color */\n}\n\n/**\n * Palette\n */\nexport interface Palette {\n index: number /* Palette index */\n color: PaletteColor /* Palette colors */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch color palette\n *\n * @param inputs - Color palette element\n *\n * @returns Color palette observable\n */\nexport function watchPalette(\n inputs: HTMLInputElement[]\n): Observable {\n const current = __md_get(\"__palette\") || {\n index: inputs.findIndex(input => matchMedia(\n input.getAttribute(\"data-md-color-media\")!\n ).matches)\n }\n\n /* Emit changes in color palette */\n return of(...inputs)\n .pipe(\n mergeMap(input => fromEvent(input, \"change\")\n .pipe(\n mapTo(input)\n )\n ),\n startWith(inputs[Math.max(0, current.index)]),\n map(input => ({\n index: inputs.indexOf(input),\n color: {\n scheme: input.getAttribute(\"data-md-color-scheme\"),\n primary: input.getAttribute(\"data-md-color-primary\"),\n accent: input.getAttribute(\"data-md-color-accent\")\n }\n } as Palette)),\n shareReplay(1)\n )\n}\n\n/**\n * Mount color palette\n *\n * @param el - Color palette element\n *\n * @returns Color palette component observable\n */\nexport function mountPalette(\n el: HTMLElement\n): Observable> {\n return defer(() => {\n const push$ = new Subject()\n push$.subscribe(palette => {\n\n /* Set color palette */\n for (const [key, value] of Object.entries(palette.color))\n document.body.setAttribute(`data-md-color-${key}`, value)\n\n /* Toggle visibility */\n for (let index = 0; index < inputs.length; index++) {\n const label = inputs[index].nextElementSibling\n if (label instanceof HTMLElement)\n label.hidden = palette.index !== index\n }\n\n /* Persist preference in local storage */\n __md_set(\"__palette\", palette)\n })\n\n /* Create and return component */\n const inputs = getElements(\"input\", el)\n return watchPalette(inputs)\n .pipe(\n tap(state => push$.next(state)),\n finalize(() => push$.complete()),\n map(state => ({ ref: el, ...state }))\n )\n })\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport ClipboardJS from \"clipboard\"\nimport {\n Observable,\n Subject,\n mapTo,\n tap\n} from \"rxjs\"\n\nimport { translation } from \"~/_\"\nimport { getElement } from \"~/browser\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Setup options\n */\ninterface SetupOptions {\n alert$: Subject /* Alert subject */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Extract text to copy\n *\n * @param el - HTML element\n *\n * @returns Extracted text\n */\nfunction extract(el: HTMLElement): string {\n el.setAttribute(\"data-md-copying\", \"\")\n const text = el.innerText\n el.removeAttribute(\"data-md-copying\")\n return text\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Set up Clipboard.js integration\n *\n * @param options - Options\n */\nexport function setupClipboardJS(\n { alert$ }: SetupOptions\n): void {\n if (ClipboardJS.isSupported()) {\n new Observable(subscriber => {\n new ClipboardJS(\"[data-clipboard-target], [data-clipboard-text]\", {\n text: el => (\n el.getAttribute(\"data-clipboard-text\")! ||\n extract(getElement(\n el.getAttribute(\"data-clipboard-target\")!\n ))\n )\n })\n .on(\"success\", ev => subscriber.next(ev))\n })\n .pipe(\n tap(ev => {\n const trigger = ev.trigger as HTMLElement\n trigger.focus()\n }),\n mapTo(translation(\"clipboard.copied\"))\n )\n .subscribe(alert$)\n }\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n bufferCount,\n catchError,\n concatMap,\n debounceTime,\n distinctUntilChanged,\n distinctUntilKeyChanged,\n filter,\n fromEvent,\n map,\n merge,\n of,\n sample,\n share,\n skip,\n skipUntil,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"~/_\"\nimport {\n Viewport,\n ViewportOffset,\n getElements,\n getOptionalElement,\n request,\n requestXML,\n setLocation,\n setLocationHash\n} from \"~/browser\"\nimport { getComponentElement } from \"~/components\"\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * History state\n */\nexport interface HistoryState {\n url: URL /* State URL */\n offset?: ViewportOffset /* State viewport offset */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Setup options\n */\ninterface SetupOptions {\n document$: Subject /* Document subject */\n location$: Subject /* Location subject */\n viewport$: Observable /* Viewport observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Preprocess a list of URLs\n *\n * This function replaces the `site_url` in the sitemap with the actual base\n * URL, to allow instant loading to work in occasions like Netlify previews.\n *\n * @param urls - URLs\n *\n * @returns Processed URLs\n */\nfunction preprocess(urls: string[]): string[] {\n if (urls.length < 2)\n return urls\n\n /* Take the first two URLs and remove everything after the last slash */\n const [root, next] = urls\n .sort((a, b) => a.length - b.length)\n .map(url => url.replace(/[^/]+$/, \"\"))\n\n /* Compute common prefix */\n let index = 0\n if (root === next)\n index = root.length\n else\n while (root.charCodeAt(index) === next.charCodeAt(index))\n index++\n\n /* Replace common prefix (i.e. base) with effective base */\n const config = configuration()\n return urls.map(url => (\n url.replace(root.slice(0, index), config.base)\n ))\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Set up instant loading\n *\n * When fetching, theoretically, we could use `responseType: \"document\"`, but\n * since all MkDocs links are relative, we need to make sure that the current\n * location matches the document we just loaded. Otherwise any relative links\n * in the document could use the old location.\n *\n * This is the reason why we need to synchronize history events and the process\n * of fetching the document for navigation changes (except `popstate` events):\n *\n * 1. Fetch document via `XMLHTTPRequest`\n * 2. Set new location via `history.pushState`\n * 3. Parse and emit fetched document\n *\n * For `popstate` events, we must not use `history.pushState`, or the forward\n * history will be irreversibly overwritten. In case the request fails, the\n * location change is dispatched regularly.\n *\n * @param options - Options\n */\nexport function setupInstantLoading(\n { document$, location$, viewport$ }: SetupOptions\n): void {\n const config = configuration()\n if (location.protocol === \"file:\")\n return\n\n /* Disable automatic scroll restoration */\n if (\"scrollRestoration\" in history) {\n history.scrollRestoration = \"manual\"\n\n /* Hack: ensure that reloads restore viewport offset */\n fromEvent(window, \"beforeunload\")\n .subscribe(() => {\n history.scrollRestoration = \"auto\"\n })\n }\n\n /* Hack: ensure absolute favicon link to omit 404s when switching */\n const favicon = getOptionalElement(\"link[rel=icon]\")\n if (typeof favicon !== \"undefined\")\n favicon.href = favicon.href\n\n /* Intercept internal navigation */\n const push$ = requestXML(new URL(\"sitemap.xml\", config.base))\n .pipe(\n map(sitemap => preprocess(getElements(\"loc\", sitemap)\n .map(node => node.textContent!)\n )),\n switchMap(urls => fromEvent(document.body, \"click\")\n .pipe(\n filter(ev => !ev.metaKey && !ev.ctrlKey),\n switchMap(ev => {\n\n /* Handle HTML and SVG elements */\n if (ev.target instanceof Element) {\n const el = ev.target.closest(\"a\")\n if (el && !el.target) {\n const url = new URL(el.href)\n\n /* Canonicalize URL */\n url.search = \"\"\n url.hash = \"\"\n\n /* Check if URL should be intercepted */\n if (\n url.pathname !== location.pathname &&\n urls.includes(url.toString())\n ) {\n ev.preventDefault()\n return of({\n url: new URL(el.href)\n })\n }\n }\n }\n return NEVER\n })\n )\n ),\n share()\n )\n\n /* Intercept history back and forward */\n const pop$ = fromEvent(window, \"popstate\")\n .pipe(\n filter(ev => ev.state !== null),\n map(ev => ({\n url: new URL(location.href),\n offset: ev.state\n })),\n share()\n )\n\n /* Emit location change */\n merge(push$, pop$)\n .pipe(\n distinctUntilChanged((a, b) => a.url.href === b.url.href),\n map(({ url }) => url)\n )\n .subscribe(location$)\n\n /* Fetch document via `XMLHTTPRequest` */\n const response$ = location$\n .pipe(\n distinctUntilKeyChanged(\"pathname\"),\n switchMap(url => request(url.href)\n .pipe(\n catchError(() => {\n setLocation(url)\n return NEVER\n })\n )\n ),\n share()\n )\n\n /* Set new location via `history.pushState` */\n push$\n .pipe(\n sample(response$)\n )\n .subscribe(({ url }) => {\n history.pushState({}, \"\", `${url}`)\n })\n\n /* Parse and emit fetched document */\n const dom = new DOMParser()\n response$\n .pipe(\n switchMap(res => res.text()),\n map(res => dom.parseFromString(res, \"text/html\"))\n )\n .subscribe(document$)\n\n /* Replace meta tags and components */\n document$\n .pipe(\n skip(1)\n )\n .subscribe(replacement => {\n for (const selector of [\n\n /* Meta tags */\n \"title\",\n \"link[rel=canonical]\",\n \"meta[name=author]\",\n \"meta[name=description]\",\n\n /* Components */\n \"[data-md-component=announce]\",\n \"[data-md-component=container]\",\n \"[data-md-component=header-topic]\",\n \"[data-md-component=logo]\",\n \"[data-md-component=skip]\",\n ...feature(\"navigation.tabs.sticky\")\n ? [\"[data-md-component=tabs]\"]\n : []\n ]) {\n const source = getOptionalElement(selector)\n const target = getOptionalElement(selector, replacement)\n if (\n typeof source !== \"undefined\" &&\n typeof target !== \"undefined\"\n ) {\n source.replaceWith(target)\n }\n }\n })\n\n /* Re-evaluate scripts */\n document$\n .pipe(\n skip(1),\n map(() => getComponentElement(\"container\")),\n switchMap(el => of(...getElements(\"script\", el))),\n concatMap(el => {\n const script = h(\"script\")\n if (el.src) {\n for (const name of el.getAttributeNames())\n script.setAttribute(name, el.getAttribute(name)!)\n el.replaceWith(script)\n\n /* Complete when script is loaded */\n return new Observable(observer => {\n script.onload = () => observer.complete()\n })\n\n /* Complete immediately */\n } else {\n script.textContent = el.textContent\n el.replaceWith(script)\n return EMPTY\n }\n })\n )\n .subscribe()\n\n /* Emit history state change */\n merge(push$, pop$)\n .pipe(\n sample(document$)\n )\n .subscribe(({ url, offset }) => {\n if (url.hash && !offset) {\n setLocationHash(url.hash)\n } else {\n window.scrollTo(0, offset?.y || 0)\n }\n })\n\n /* Debounce update of viewport offset */\n viewport$\n .pipe(\n skipUntil(push$),\n debounceTime(250),\n distinctUntilKeyChanged(\"offset\")\n )\n .subscribe(({ offset }) => {\n history.replaceState(offset, \"\")\n })\n\n /* Set viewport offset from history */\n merge(push$, pop$)\n .pipe(\n bufferCount(2, 1),\n filter(([a, b]) => a.url.pathname === b.url.pathname),\n map(([, state]) => state)\n )\n .subscribe(({ offset }) => {\n window.scrollTo(0, offset?.y || 0)\n })\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport escapeHTML from \"escape-html\"\n\nimport { SearchIndexDocument } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search document\n */\nexport interface SearchDocument extends SearchIndexDocument {\n parent?: SearchIndexDocument /* Parent article */\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Search document mapping\n */\nexport type SearchDocumentMap = Map\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Create a search document mapping\n *\n * @param docs - Search index documents\n *\n * @returns Search document map\n */\nexport function setupSearchDocumentMap(\n docs: SearchIndexDocument[]\n): SearchDocumentMap {\n const documents = new Map()\n const parents = new Set()\n for (const doc of docs) {\n const [path, hash] = doc.location.split(\"#\")\n\n /* Extract location and title */\n const location = doc.location\n const title = doc.title\n\n /* Escape and cleanup text */\n const text = escapeHTML(doc.text)\n .replace(/\\s+(?=[,.:;!?])/g, \"\")\n .replace(/\\s+/g, \" \")\n\n /* Handle section */\n if (hash) {\n const parent = documents.get(path)!\n\n /* Ignore first section, override article */\n if (!parents.has(parent)) {\n parent.title = doc.title\n parent.text = text\n\n /* Remember that we processed the article */\n parents.add(parent)\n\n /* Add subsequent section */\n } else {\n documents.set(location, {\n location,\n title,\n text,\n parent\n })\n }\n\n /* Add article */\n } else {\n documents.set(location, {\n location,\n title,\n text\n })\n }\n }\n return documents\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport escapeHTML from \"escape-html\"\n\nimport { SearchIndexConfig } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search highlight function\n *\n * @param value - Value\n *\n * @returns Highlighted value\n */\nexport type SearchHighlightFn = (value: string) => string\n\n/**\n * Search highlight factory function\n *\n * @param query - Query value\n *\n * @returns Search highlight function\n */\nexport type SearchHighlightFactoryFn = (query: string) => SearchHighlightFn\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Create a search highlighter\n *\n * @param config - Search index configuration\n * @param escape - Whether to escape HTML\n *\n * @returns Search highlight factory function\n */\nexport function setupSearchHighlighter(\n config: SearchIndexConfig, escape: boolean\n): SearchHighlightFactoryFn {\n const separator = new RegExp(config.separator, \"img\")\n const highlight = (_: unknown, data: string, term: string) => {\n return `${data}${term}`\n }\n\n /* Return factory function */\n return (query: string) => {\n query = query\n .replace(/[\\s*+\\-:~^]+/g, \" \")\n .trim()\n\n /* Create search term match expression */\n const match = new RegExp(`(^|${config.separator})(${\n query\n .replace(/[|\\\\{}()[\\]^$+*?.-]/g, \"\\\\$&\")\n .replace(separator, \"|\")\n })`, \"img\")\n\n /* Highlight string value */\n return value => (\n escape\n ? escapeHTML(value)\n : value\n )\n .replace(match, highlight)\n .replace(/<\\/mark>(\\s+)]*>/img, \"$1\")\n }\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search transformation function\n *\n * @param value - Query value\n *\n * @returns Transformed query value\n */\nexport type SearchTransformFn = (value: string) => string\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Default transformation function\n *\n * 1. Search for terms in quotation marks and prepend a `+` modifier to denote\n * that the resulting document must contain all terms, converting the query\n * to an `AND` query (as opposed to the default `OR` behavior). While users\n * may expect terms enclosed in quotation marks to map to span queries, i.e.\n * for which order is important, Lunr.js doesn't support them, so the best\n * we can do is to convert the terms to an `AND` query.\n *\n * 2. Replace control characters which are not located at the beginning of the\n * query or preceded by white space, or are not followed by a non-whitespace\n * character or are at the end of the query string. Furthermore, filter\n * unmatched quotation marks.\n *\n * 3. Trim excess whitespace from left and right.\n *\n * @param query - Query value\n *\n * @returns Transformed query value\n */\nexport function defaultTransform(query: string): string {\n return query\n .split(/\"([^\"]+)\"/g) /* => 1 */\n .map((terms, index) => index & 1\n ? terms.replace(/^\\b|^(?![^\\x00-\\x7F]|$)|\\s+/g, \" +\")\n : terms\n )\n .join(\"\")\n .replace(/\"|(?:^|\\s+)[*+\\-:^~]+(?=\\s+|$)/g, \"\") /* => 2 */\n .trim() /* => 3 */\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A RTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { SearchIndex, SearchResult } from \"../../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search message type\n */\nexport const enum SearchMessageType {\n SETUP, /* Search index setup */\n READY, /* Search index ready */\n QUERY, /* Search query */\n RESULT /* Search results */\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Message containing the data necessary to setup the search index\n */\nexport interface SearchSetupMessage {\n type: SearchMessageType.SETUP /* Message type */\n data: SearchIndex /* Message data */\n}\n\n/**\n * Message indicating the search index is ready\n */\nexport interface SearchReadyMessage {\n type: SearchMessageType.READY /* Message type */\n}\n\n/**\n * Message containing a search query\n */\nexport interface SearchQueryMessage {\n type: SearchMessageType.QUERY /* Message type */\n data: string /* Message data */\n}\n\n/**\n * Message containing results for a search query\n */\nexport interface SearchResultMessage {\n type: SearchMessageType.RESULT /* Message type */\n data: SearchResult /* Message data */\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Message exchanged with the search worker\n */\nexport type SearchMessage =\n | SearchSetupMessage\n | SearchReadyMessage\n | SearchQueryMessage\n | SearchResultMessage\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Type guard for search setup messages\n *\n * @param message - Search worker message\n *\n * @returns Test result\n */\nexport function isSearchSetupMessage(\n message: SearchMessage\n): message is SearchSetupMessage {\n return message.type === SearchMessageType.SETUP\n}\n\n/**\n * Type guard for search ready messages\n *\n * @param message - Search worker message\n *\n * @returns Test result\n */\nexport function isSearchReadyMessage(\n message: SearchMessage\n): message is SearchReadyMessage {\n return message.type === SearchMessageType.READY\n}\n\n/**\n * Type guard for search query messages\n *\n * @param message - Search worker message\n *\n * @returns Test result\n */\nexport function isSearchQueryMessage(\n message: SearchMessage\n): message is SearchQueryMessage {\n return message.type === SearchMessageType.QUERY\n}\n\n/**\n * Type guard for search result messages\n *\n * @param message - Search worker message\n *\n * @returns Test result\n */\nexport function isSearchResultMessage(\n message: SearchMessage\n): message is SearchResultMessage {\n return message.type === SearchMessageType.RESULT\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A RTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n ObservableInput,\n Subject,\n from,\n map,\n share\n} from \"rxjs\"\n\nimport { configuration, feature, translation } from \"~/_\"\nimport { WorkerHandler, watchWorker } from \"~/browser\"\n\nimport { SearchIndex } from \"../../_\"\nimport {\n SearchOptions,\n SearchPipeline\n} from \"../../options\"\nimport {\n SearchMessage,\n SearchMessageType,\n SearchSetupMessage,\n isSearchResultMessage\n} from \"../message\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search worker\n */\nexport type SearchWorker = WorkerHandler\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Set up search index\n *\n * @param data - Search index\n *\n * @returns Search index\n */\nfunction setupSearchIndex({ config, docs }: SearchIndex): SearchIndex {\n\n /* Override default language with value from translation */\n if (config.lang.length === 1 && config.lang[0] === \"en\")\n config.lang = [\n translation(\"search.config.lang\")\n ]\n\n /* Override default separator with value from translation */\n if (config.separator === \"[\\\\s\\\\-]+\")\n config.separator = translation(\"search.config.separator\")\n\n /* Set pipeline from translation */\n const pipeline = translation(\"search.config.pipeline\")\n .split(/\\s*,\\s*/)\n .filter(Boolean) as SearchPipeline\n\n /* Determine search options */\n const options: SearchOptions = {\n pipeline,\n suggestions: feature(\"search.suggest\")\n }\n\n /* Return search index after defaulting */\n return { config, docs, options }\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Set up search worker\n *\n * This function creates a web worker to set up and query the search index,\n * which is done using Lunr.js. The index must be passed as an observable to\n * enable hacks like _localsearch_ via search index embedding as JSON.\n *\n * @param url - Worker URL\n * @param index - Search index observable input\n *\n * @returns Search worker\n */\nexport function setupSearchWorker(\n url: string, index: ObservableInput\n): SearchWorker {\n const config = configuration()\n const worker = new Worker(url)\n\n /* Create communication channels and resolve relative links */\n const tx$ = new Subject()\n const rx$ = watchWorker(worker, { tx$ })\n .pipe(\n map(message => {\n if (isSearchResultMessage(message)) {\n for (const result of message.data.items)\n for (const document of result)\n document.location = `${new URL(document.location, config.base)}`\n }\n return message\n }),\n share()\n )\n\n /* Set up search index */\n from(index)\n .pipe(\n map(data => ({\n type: SearchMessageType.SETUP,\n data: setupSearchIndex(data)\n } as SearchSetupMessage))\n )\n .subscribe(tx$.next.bind(tx$))\n\n /* Return search worker */\n return { tx$, rx$ }\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { combineLatest, map } from \"rxjs\"\n\nimport { configuration } from \"~/_\"\nimport {\n getElement,\n requestJSON\n} from \"~/browser\"\nimport { getComponentElements } from \"~/components\"\nimport {\n Version,\n renderVersionSelector\n} from \"~/templates\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Set up version selector\n */\nexport function setupVersionSelector(): void {\n const config = configuration()\n const versions$ = requestJSON(\n new URL(\"../versions.json\", config.base)\n )\n\n /* Determine current version */\n const current$ = versions$\n .pipe(\n map(versions => {\n const [, current] = config.base.match(/([^/]+)\\/?$/)!\n return versions.find(({ version, aliases }) => (\n version === current || aliases.includes(current)\n )) || versions[0]\n })\n )\n\n /* Render version selector and warning */\n combineLatest([versions$, current$])\n .subscribe(([versions, current]) => {\n const topic = getElement(\".md-header__topic\")\n topic.appendChild(renderVersionSelector(versions, current))\n\n /* Check if version state was already determined */\n if (__md_get(\"__outdated\", sessionStorage) === null) {\n const latest = config.version?.default || \"latest\"\n const outdated = !current.aliases.includes(latest)\n\n /* Persist version state in session storage */\n __md_set(\"__outdated\", outdated, sessionStorage)\n if (outdated)\n for (const warning of getComponentElements(\"outdated\"))\n warning.hidden = false\n }\n })\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n Subject,\n combineLatest,\n delay,\n distinctUntilChanged,\n distinctUntilKeyChanged,\n filter,\n finalize,\n fromEvent,\n map,\n merge,\n shareReplay,\n startWith,\n take,\n takeLast,\n takeUntil,\n tap\n} from \"rxjs\"\n\nimport { translation } from \"~/_\"\nimport {\n getLocation,\n setToggle,\n watchElementFocus\n} from \"~/browser\"\nimport {\n SearchMessageType,\n SearchQueryMessage,\n SearchWorker,\n defaultTransform,\n isSearchReadyMessage\n} from \"~/integrations\"\n\nimport { Component } from \"../../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search query\n */\nexport interface SearchQuery {\n value: string /* Query value */\n focus: boolean /* Query focus */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch search query\n *\n * Note that the focus event which triggers re-reading the current query value\n * is delayed by `1ms` so the input's empty state is allowed to propagate.\n *\n * @param el - Search query element\n * @param worker - Search worker\n *\n * @returns Search query observable\n */\nexport function watchSearchQuery(\n el: HTMLInputElement, { rx$ }: SearchWorker\n): Observable {\n const fn = __search?.transform || defaultTransform\n\n /* Immediately show search dialog */\n const { searchParams } = getLocation()\n if (searchParams.has(\"q\"))\n setToggle(\"search\", true)\n\n /* Intercept query parameter (deep link) */\n const param$ = rx$\n .pipe(\n filter(isSearchReadyMessage),\n take(1),\n map(() => searchParams.get(\"q\") || \"\")\n )\n\n /* Set query from parameter */\n param$.subscribe(value => { // TODO: not ideal - find a better way\n if (value)\n el.value = value\n })\n\n /* Intercept focus and input events */\n const focus$ = watchElementFocus(el)\n const value$ = merge(\n fromEvent(el, \"keyup\"),\n fromEvent(el, \"focus\").pipe(delay(1)),\n param$\n )\n .pipe(\n map(() => fn(el.value)),\n startWith(\"\"),\n distinctUntilChanged(),\n )\n\n /* Combine into single observable */\n return combineLatest([value$, focus$])\n .pipe(\n map(([value, focus]) => ({ value, focus })),\n shareReplay(1)\n )\n}\n\n/**\n * Mount search query\n *\n * @param el - Search query element\n * @param worker - Search worker\n *\n * @returns Search query component observable\n */\nexport function mountSearchQuery(\n el: HTMLInputElement, { tx$, rx$ }: SearchWorker\n): Observable> {\n const push$ = new Subject()\n\n /* Handle value changes */\n push$\n .pipe(\n distinctUntilKeyChanged(\"value\"),\n map(({ value }): SearchQueryMessage => ({\n type: SearchMessageType.QUERY,\n data: value\n }))\n )\n .subscribe(tx$.next.bind(tx$))\n\n /* Handle focus changes */\n push$\n .pipe(\n distinctUntilKeyChanged(\"focus\")\n )\n .subscribe(({ focus }) => {\n if (focus) {\n setToggle(\"search\", focus)\n el.placeholder = \"\"\n } else {\n el.placeholder = translation(\"search.placeholder\")\n }\n })\n\n /* Handle reset */\n fromEvent(el.form!, \"reset\")\n .pipe(\n takeUntil(push$.pipe(takeLast(1)))\n )\n .subscribe(() => el.focus())\n\n /* Create and return component */\n return watchSearchQuery(el, { tx$, rx$ })\n .pipe(\n tap(state => push$.next(state)),\n finalize(() => push$.complete()),\n map(state => ({ ref: el, ...state }))\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n Subject,\n bufferCount,\n filter,\n finalize,\n map,\n merge,\n of,\n skipUntil,\n switchMap,\n take,\n tap,\n withLatestFrom,\n zipWith\n} from \"rxjs\"\n\nimport { translation } from \"~/_\"\nimport {\n getElement,\n watchElementBoundary\n} from \"~/browser\"\nimport {\n SearchResult,\n SearchWorker,\n isSearchReadyMessage,\n isSearchResultMessage\n} from \"~/integrations\"\nimport { renderSearchResultItem } from \"~/templates\"\nimport { round } from \"~/utilities\"\n\nimport { Component } from \"../../_\"\nimport { SearchQuery } from \"../query\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n query$: Observable /* Search query observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount search result list\n *\n * This function performs a lazy rendering of the search results, depending on\n * the vertical offset of the search result container.\n *\n * @param el - Search result list element\n * @param worker - Search worker\n * @param options - Options\n *\n * @returns Search result list component observable\n */\nexport function mountSearchResult(\n el: HTMLElement, { rx$ }: SearchWorker, { query$ }: MountOptions\n): Observable> {\n const push$ = new Subject()\n const boundary$ = watchElementBoundary(el.parentElement!)\n .pipe(\n filter(Boolean)\n )\n\n /* Retrieve nested components */\n const meta = getElement(\":scope > :first-child\", el)\n const list = getElement(\":scope > :last-child\", el)\n\n /* Wait until search is ready */\n const ready$ = rx$\n .pipe(\n filter(isSearchReadyMessage),\n take(1)\n )\n\n /* Update search result metadata */\n push$\n .pipe(\n withLatestFrom(query$),\n skipUntil(ready$)\n )\n .subscribe(([{ items }, { value }]) => {\n if (value) {\n switch (items.length) {\n\n /* No results */\n case 0:\n meta.textContent = translation(\"search.result.none\")\n break\n\n /* One result */\n case 1:\n meta.textContent = translation(\"search.result.one\")\n break\n\n /* Multiple result */\n default:\n meta.textContent = translation(\n \"search.result.other\",\n round(items.length)\n )\n }\n } else {\n meta.textContent = translation(\"search.result.placeholder\")\n }\n })\n\n /* Update search result list */\n push$\n .pipe(\n tap(() => list.innerHTML = \"\"),\n switchMap(({ items }) => merge(\n of(...items.slice(0, 10)),\n of(...items.slice(10))\n .pipe(\n bufferCount(4),\n zipWith(boundary$),\n switchMap(([chunk]) => of(...chunk))\n )\n ))\n )\n .subscribe(result => list.appendChild(\n renderSearchResultItem(result)\n ))\n\n /* Filter search result message */\n const result$ = rx$\n .pipe(\n filter(isSearchResultMessage),\n map(({ data }) => data)\n )\n\n /* Create and return component */\n return result$\n .pipe(\n tap(state => push$.next(state)),\n finalize(() => push$.complete()),\n map(state => ({ ref: el, ...state }))\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n Subject,\n finalize,\n fromEvent,\n map,\n tap\n} from \"rxjs\"\n\nimport { getLocation } from \"~/browser\"\n\nimport { Component } from \"../../_\"\nimport { SearchQuery } from \"../query\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search sharing\n */\nexport interface SearchShare {\n url: URL /* Deep link for sharing */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n query$: Observable /* Search query observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n query$: Observable /* Search query observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount search sharing\n *\n * @param _el - Search sharing element\n * @param options - Options\n *\n * @returns Search sharing observable\n */\nexport function watchSearchShare(\n _el: HTMLElement, { query$ }: WatchOptions\n): Observable {\n return query$\n .pipe(\n map(({ value }) => {\n const url = getLocation()\n url.hash = \"\"\n url.searchParams.delete(\"h\")\n url.searchParams.set(\"q\", value)\n return { url }\n })\n )\n}\n\n/**\n * Mount search sharing\n *\n * @param el - Search sharing element\n * @param options - Options\n *\n * @returns Search sharing component observable\n */\nexport function mountSearchShare(\n el: HTMLAnchorElement, options: MountOptions\n): Observable> {\n const push$ = new Subject()\n push$.subscribe(({ url }) => {\n el.setAttribute(\"data-clipboard-text\", el.href)\n el.href = `${url}`\n })\n\n /* Prevent following of link */\n fromEvent(el, \"click\")\n .subscribe(ev => ev.preventDefault())\n\n /* Create and return component */\n return watchSearchShare(el, options)\n .pipe(\n tap(state => push$.next(state)),\n finalize(() => push$.complete()),\n map(state => ({ ref: el, ...state }))\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n Subject,\n asyncScheduler,\n combineLatestWith,\n distinctUntilChanged,\n filter,\n finalize,\n fromEvent,\n map,\n merge,\n observeOn,\n tap\n} from \"rxjs\"\n\nimport { Keyboard } from \"~/browser\"\nimport {\n SearchResult,\n SearchWorker,\n isSearchResultMessage\n} from \"~/integrations\"\n\nimport { Component, getComponentElement } from \"../../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search suggestions\n */\nexport interface SearchSuggest {}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n keyboard$: Observable /* Keyboard observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount search suggestions\n *\n * This function will perform a lazy rendering of the search results, depending\n * on the vertical offset of the search result container.\n *\n * @param el - Search result list element\n * @param worker - Search worker\n * @param options - Options\n *\n * @returns Search result list component observable\n */\nexport function mountSearchSuggest(\n el: HTMLElement, { rx$ }: SearchWorker, { keyboard$ }: MountOptions\n): Observable> {\n const push$ = new Subject()\n\n /* Retrieve query component and track all changes */\n const query = getComponentElement(\"search-query\")\n const query$ = merge(\n fromEvent(query, \"keydown\"),\n fromEvent(query, \"focus\")\n )\n .pipe(\n observeOn(asyncScheduler),\n map(() => query.value),\n distinctUntilChanged(),\n )\n\n /* Update search suggestions */\n push$\n .pipe(\n combineLatestWith(query$),\n map(([{ suggestions }, value]) => {\n const words = value.split(/([\\s-]+)/)\n if (suggestions?.length && words[words.length - 1]) {\n const last = suggestions[suggestions.length - 1]\n if (last.startsWith(words[words.length - 1]))\n words[words.length - 1] = last\n } else {\n words.length = 0\n }\n return words\n })\n )\n .subscribe(words => el.innerHTML = words\n .join(\"\")\n .replace(/\\s/g, \" \")\n )\n\n /* Set up search keyboard handlers */\n keyboard$\n .pipe(\n filter(({ mode }) => mode === \"search\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Right arrow: accept current suggestion */\n case \"ArrowRight\":\n if (\n el.innerText.length &&\n query.selectionStart === query.value.length\n )\n query.value = el.innerText\n break\n }\n })\n\n /* Filter search result message */\n const result$ = rx$\n .pipe(\n filter(isSearchResultMessage),\n map(({ data }) => data)\n )\n\n /* Create and return component */\n return result$\n .pipe(\n tap(state => push$.next(state)),\n finalize(() => push$.complete()),\n map(() => ({ ref: el }))\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n NEVER,\n Observable,\n ObservableInput,\n filter,\n merge,\n mergeWith,\n sample,\n take\n} from \"rxjs\"\n\nimport { configuration } from \"~/_\"\nimport {\n Keyboard,\n getActiveElement,\n getElements,\n setToggle\n} from \"~/browser\"\nimport {\n SearchIndex,\n SearchResult,\n isSearchQueryMessage,\n isSearchReadyMessage,\n setupSearchWorker\n} from \"~/integrations\"\n\nimport {\n Component,\n getComponentElement,\n getComponentElements\n} from \"../../_\"\nimport {\n SearchQuery,\n mountSearchQuery\n} from \"../query\"\nimport { mountSearchResult } from \"../result\"\nimport {\n SearchShare,\n mountSearchShare\n} from \"../share\"\nimport {\n SearchSuggest,\n mountSearchSuggest\n} from \"../suggest\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search\n */\nexport type Search =\n | SearchQuery\n | SearchResult\n | SearchShare\n | SearchSuggest\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n index$: ObservableInput /* Search index observable */\n keyboard$: Observable /* Keyboard observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount search\n *\n * This function sets up the search functionality, including the underlying\n * web worker and all keyboard bindings.\n *\n * @param el - Search element\n * @param options - Options\n *\n * @returns Search component observable\n */\nexport function mountSearch(\n el: HTMLElement, { index$, keyboard$ }: MountOptions\n): Observable> {\n const config = configuration()\n try {\n const url = __search?.worker || config.search\n const worker = setupSearchWorker(url, index$)\n\n /* Retrieve query and result components */\n const query = getComponentElement(\"search-query\", el)\n const result = getComponentElement(\"search-result\", el)\n\n /* Re-emit query when search is ready */\n const { tx$, rx$ } = worker\n tx$\n .pipe(\n filter(isSearchQueryMessage),\n sample(rx$.pipe(filter(isSearchReadyMessage))),\n take(1)\n )\n .subscribe(tx$.next.bind(tx$))\n\n /* Set up search keyboard handlers */\n keyboard$\n .pipe(\n filter(({ mode }) => mode === \"search\")\n )\n .subscribe(key => {\n const active = getActiveElement()\n switch (key.type) {\n\n /* Enter: go to first (best) result */\n case \"Enter\":\n if (active === query) {\n const anchors = new Map()\n for (const anchor of getElements(\n \":first-child [href]\", result\n )) {\n const article = anchor.firstElementChild!\n anchors.set(anchor, parseFloat(\n article.getAttribute(\"data-md-score\")!\n ))\n }\n\n /* Go to result with highest score, if any */\n if (anchors.size) {\n const [[best]] = [...anchors].sort(([, a], [, b]) => b - a)\n best.click()\n }\n\n /* Otherwise omit form submission */\n key.claim()\n }\n break\n\n /* Escape or Tab: close search */\n case \"Escape\":\n case \"Tab\":\n setToggle(\"search\", false)\n query.blur()\n break\n\n /* Vertical arrows: select previous or next search result */\n case \"ArrowUp\":\n case \"ArrowDown\":\n if (typeof active === \"undefined\") {\n query.focus()\n } else {\n const els = [query, ...getElements(\n \":not(details) > [href], summary, details[open] [href]\",\n result\n )]\n const i = Math.max(0, (\n Math.max(0, els.indexOf(active)) + els.length + (\n key.type === \"ArrowUp\" ? -1 : +1\n )\n ) % els.length)\n els[i].focus()\n }\n\n /* Prevent scrolling of page */\n key.claim()\n break\n\n /* All other keys: hand to search query */\n default:\n if (query !== getActiveElement())\n query.focus()\n }\n })\n\n /* Set up global keyboard handlers */\n keyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\"),\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Open search and select query */\n case \"f\":\n case \"s\":\n case \"/\":\n query.focus()\n query.select()\n\n /* Prevent scrolling of page */\n key.claim()\n break\n }\n })\n\n /* Create and return component */\n const query$ = mountSearchQuery(query, worker)\n const result$ = mountSearchResult(result, worker, { query$ })\n return merge(query$, result$)\n .pipe(\n mergeWith(\n\n /* Search sharing */\n ...getComponentElements(\"search-share\", el)\n .map(child => mountSearchShare(child, { query$ })),\n\n /* Search suggestions */\n ...getComponentElements(\"search-suggest\", el)\n .map(child => mountSearchSuggest(child, worker, { keyboard$ }))\n )\n )\n\n /* Gracefully handle broken search */\n } catch (err) {\n el.hidden = true\n return NEVER\n }\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n ObservableInput,\n combineLatest,\n filter,\n map,\n startWith\n} from \"rxjs\"\n\nimport { getLocation } from \"~/browser\"\nimport {\n SearchIndex,\n setupSearchHighlighter\n} from \"~/integrations\"\nimport { h } from \"~/utilities\"\n\nimport { Component } from \"../../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search highlighting\n */\nexport interface SearchHighlight {\n nodes: Map /* Map of replacements */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n index$: ObservableInput /* Search index observable */\n location$: Observable /* Location observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount search highlighting\n *\n * @param el - Content element\n * @param options - Options\n *\n * @returns Search highlighting component observable\n */\nexport function mountSearchHiglight(\n el: HTMLElement, { index$, location$ }: MountOptions\n): Observable> {\n return combineLatest([\n index$,\n location$\n .pipe(\n startWith(getLocation()),\n filter(url => !!url.searchParams.get(\"h\"))\n )\n ])\n .pipe(\n map(([index, url]) => setupSearchHighlighter(index.config, true)(\n url.searchParams.get(\"h\")!\n )),\n map(fn => {\n const nodes = new Map()\n\n /* Traverse text nodes and collect matches */\n const it = document.createNodeIterator(el, NodeFilter.SHOW_TEXT)\n for (let node = it.nextNode(); node; node = it.nextNode()) {\n if (node.parentElement?.offsetHeight) {\n const original = node.textContent!\n const replaced = fn(original)\n if (replaced.length > original.length)\n nodes.set(node as ChildNode, replaced)\n }\n }\n\n /* Replace original nodes with matches */\n for (const [node, text] of nodes) {\n const { childNodes } = h(\"span\", null, text)\n node.replaceWith(...Array.from(childNodes))\n }\n\n /* Return component */\n return { ref: el, nodes }\n })\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n Subject,\n animationFrameScheduler,\n auditTime,\n combineLatest,\n defer,\n distinctUntilChanged,\n finalize,\n map,\n tap,\n withLatestFrom\n} from \"rxjs\"\n\nimport {\n Viewport,\n getElement,\n getElementOffset\n} from \"~/browser\"\n\nimport { Component } from \"../_\"\nimport { Header } from \"../header\"\nimport { Main } from \"../main\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Sidebar\n */\nexport interface Sidebar {\n height: number /* Sidebar height */\n locked: boolean /* Sidebar is locked */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n viewport$: Observable /* Viewport observable */\n main$: Observable
    /* Main area observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n viewport$: Observable /* Viewport observable */\n header$: Observable
    /* Header observable */\n main$: Observable
    /* Main area observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch sidebar\n *\n * This function returns an observable that computes the visual parameters of\n * the sidebar which depends on the vertical viewport offset, as well as the\n * height of the main area. When the page is scrolled beyond the header, the\n * sidebar is locked and fills the remaining space.\n *\n * @param el - Sidebar element\n * @param options - Options\n *\n * @returns Sidebar observable\n */\nexport function watchSidebar(\n el: HTMLElement, { viewport$, main$ }: WatchOptions\n): Observable {\n const parent = el.parentElement!\n const adjust =\n parent.offsetTop -\n parent.parentElement!.offsetTop\n\n /* Compute the sidebar's available height and if it should be locked */\n return combineLatest([main$, viewport$])\n .pipe(\n map(([{ offset, height }, { offset: { y } }]) => {\n height = height\n + Math.min(adjust, Math.max(0, y - offset))\n - adjust\n return {\n height,\n locked: y >= offset + adjust\n }\n }),\n distinctUntilChanged((a, b) => (\n a.height === b.height &&\n a.locked === b.locked\n ))\n )\n}\n\n/**\n * Mount sidebar\n *\n * This function doesn't set the height of the actual sidebar, but of its first\n * child \u2013 the `.md-sidebar__scrollwrap` element in order to mitigiate jittery\n * sidebars when the footer is scrolled into view. At some point we switched\n * from `absolute` / `fixed` positioning to `sticky` positioning, significantly\n * reducing jitter in some browsers (respectively Firefox and Safari) when\n * scrolling from the top. However, top-aligned sticky positioning means that\n * the sidebar snaps to the bottom when the end of the container is reached.\n * This is what leads to the mentioned jitter, as the sidebar's height may be\n * updated too slowly.\n *\n * This behaviour can be mitigiated by setting the height of the sidebar to `0`\n * while preserving the padding, and the height on its first element.\n *\n * @param el - Sidebar element\n * @param options - Options\n *\n * @returns Sidebar component observable\n */\nexport function mountSidebar(\n el: HTMLElement, { header$, ...options }: MountOptions\n): Observable> {\n const inner = getElement(\".md-sidebar__scrollwrap\", el)\n const { y } = getElementOffset(inner)\n return defer(() => {\n const push$ = new Subject()\n push$\n .pipe(\n auditTime(0, animationFrameScheduler),\n withLatestFrom(header$)\n )\n .subscribe({\n\n /* Handle emission */\n next([{ height }, { height: offset }]) {\n inner.style.height = `${height - 2 * y}px`\n el.style.top = `${offset}px`\n },\n\n /* Handle complete */\n complete() {\n inner.style.height = \"\"\n el.style.top = \"\"\n }\n })\n\n /* Create and return component */\n return watchSidebar(el, options)\n .pipe(\n tap(state => push$.next(state)),\n finalize(() => push$.complete()),\n map(state => ({ ref: el, ...state }))\n )\n })\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { Repo, User } from \"github-types\"\nimport {\n Observable,\n defaultIfEmpty,\n map,\n zip\n} from \"rxjs\"\n\nimport { requestJSON } from \"~/browser\"\n\nimport { SourceFacts } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * GitHub release (partial)\n */\ninterface Release {\n tag_name: string /* Tag name */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch GitHub repository facts\n *\n * @param user - GitHub user or organization\n * @param repo - GitHub repository\n *\n * @returns Repository facts observable\n */\nexport function fetchSourceFactsFromGitHub(\n user: string, repo?: string\n): Observable {\n if (typeof repo !== \"undefined\") {\n const url = `https://api.github.com/repos/${user}/${repo}`\n return zip(\n\n /* Fetch version */\n requestJSON(`${url}/releases/latest`)\n .pipe(\n map(release => ({\n version: release.tag_name\n })),\n defaultIfEmpty({})\n ),\n\n /* Fetch stars and forks */\n requestJSON(url)\n .pipe(\n map(info => ({\n stars: info.stargazers_count,\n forks: info.forks_count\n })),\n defaultIfEmpty({})\n )\n )\n .pipe(\n map(([release, info]) => ({ ...release, ...info }))\n )\n\n /* User or organization */\n } else {\n const url = `https://api.github.com/users/${user}`\n return requestJSON(url)\n .pipe(\n map(info => ({\n repositories: info.public_repos\n })),\n defaultIfEmpty({})\n )\n }\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { ProjectSchema } from \"gitlab\"\nimport {\n Observable,\n defaultIfEmpty,\n map\n} from \"rxjs\"\n\nimport { requestJSON } from \"~/browser\"\n\nimport { SourceFacts } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch GitLab repository facts\n *\n * @param base - GitLab base\n * @param project - GitLab project\n *\n * @returns Repository facts observable\n */\nexport function fetchSourceFactsFromGitLab(\n base: string, project: string\n): Observable {\n const url = `https://${base}/api/v4/projects/${encodeURIComponent(project)}`\n return requestJSON(url)\n .pipe(\n map(({ star_count, forks_count }) => ({\n stars: star_count,\n forks: forks_count\n })),\n defaultIfEmpty({})\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { EMPTY, Observable } from \"rxjs\"\n\nimport { fetchSourceFactsFromGitHub } from \"../github\"\nimport { fetchSourceFactsFromGitLab } from \"../gitlab\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Repository facts for repositories\n */\nexport interface RepositoryFacts {\n stars?: number /* Number of stars */\n forks?: number /* Number of forks */\n version?: string /* Latest version */\n}\n\n/**\n * Repository facts for organizations\n */\nexport interface OrganizationFacts {\n repositories?: number /* Number of repositories */\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Repository facts\n */\nexport type SourceFacts =\n | RepositoryFacts\n | OrganizationFacts\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch repository facts\n *\n * @param url - Repository URL\n *\n * @returns Repository facts observable\n */\nexport function fetchSourceFacts(\n url: string\n): Observable {\n const [type] = url.match(/(git(?:hub|lab))/i) || []\n switch (type.toLowerCase()) {\n\n /* GitHub repository */\n case \"github\":\n const [, user, repo] = url.match(/^.+github\\.com\\/([^/]+)\\/?([^/]+)?/i)!\n return fetchSourceFactsFromGitHub(user, repo)\n\n /* GitLab repository */\n case \"gitlab\":\n const [, base, slug] = url.match(/^.+?([^/]*gitlab[^/]+)\\/(.+?)\\/?$/i)!\n return fetchSourceFactsFromGitLab(base, slug)\n\n /* Everything else */\n default:\n return EMPTY\n }\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n EMPTY,\n Observable,\n Subject,\n catchError,\n defer,\n filter,\n finalize,\n map,\n of,\n shareReplay,\n tap\n} from \"rxjs\"\n\nimport { getElement } from \"~/browser\"\nimport { renderSourceFacts } from \"~/templates\"\n\nimport { Component } from \"../../_\"\nimport {\n SourceFacts,\n fetchSourceFacts\n} from \"../facts\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Repository information\n */\nexport interface Source {\n facts: SourceFacts /* Repository facts */\n}\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Repository information observable\n */\nlet fetch$: Observable\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch repository information\n *\n * This function tries to read the repository facts from session storage, and\n * if unsuccessful, fetches them from the underlying provider.\n *\n * @param el - Repository information element\n *\n * @returns Repository information observable\n */\nexport function watchSource(\n el: HTMLAnchorElement\n): Observable {\n return fetch$ ||= defer(() => {\n const cached = __md_get(\"__source\", sessionStorage)\n if (cached)\n return of(cached)\n else\n return fetchSourceFacts(el.href)\n .pipe(\n tap(facts => __md_set(\"__source\", facts, sessionStorage))\n )\n })\n .pipe(\n catchError(() => EMPTY),\n filter(facts => Object.keys(facts).length > 0),\n map(facts => ({ facts })),\n shareReplay(1)\n )\n}\n\n/**\n * Mount repository information\n *\n * @param el - Repository information element\n *\n * @returns Repository information component observable\n */\nexport function mountSource(\n el: HTMLAnchorElement\n): Observable> {\n const inner = getElement(\":scope > :last-child\", el)\n return defer(() => {\n const push$ = new Subject()\n push$.subscribe(({ facts }) => {\n inner.appendChild(renderSourceFacts(facts))\n inner.setAttribute(\"data-md-state\", \"done\")\n })\n\n /* Create and return component */\n return watchSource(el)\n .pipe(\n tap(state => push$.next(state)),\n finalize(() => push$.complete()),\n map(state => ({ ref: el, ...state }))\n )\n })\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n Subject,\n defer,\n distinctUntilKeyChanged,\n finalize,\n map,\n of,\n switchMap,\n tap\n} from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport {\n Viewport,\n watchElementSize,\n watchViewportAt\n} from \"~/browser\"\n\nimport { Component } from \"../_\"\nimport { Header } from \"../header\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Navigation tabs\n */\nexport interface Tabs {\n hidden: boolean /* Navigation tabs are hidden */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n viewport$: Observable /* Viewport observable */\n header$: Observable
    /* Header observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n viewport$: Observable /* Viewport observable */\n header$: Observable
    /* Header observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch navigation tabs\n *\n * @param el - Navigation tabs element\n * @param options - Options\n *\n * @returns Navigation tabs observable\n */\nexport function watchTabs(\n el: HTMLElement, { viewport$, header$ }: WatchOptions\n): Observable {\n return watchElementSize(document.body)\n .pipe(\n switchMap(() => watchViewportAt(el, { header$, viewport$ })),\n map(({ offset: { y } }) => {\n return {\n hidden: y >= 10\n }\n }),\n distinctUntilKeyChanged(\"hidden\")\n )\n}\n\n/**\n * Mount navigation tabs\n *\n * This function hides the navigation tabs when scrolling past the threshold\n * and makes them reappear in a nice CSS animation when scrolling back up.\n *\n * @param el - Navigation tabs element\n * @param options - Options\n *\n * @returns Navigation tabs component observable\n */\nexport function mountTabs(\n el: HTMLElement, options: MountOptions\n): Observable> {\n return defer(() => {\n const push$ = new Subject()\n push$.subscribe({\n\n /* Handle emission */\n next({ hidden }) {\n if (hidden)\n el.setAttribute(\"data-md-state\", \"hidden\")\n else\n el.removeAttribute(\"data-md-state\")\n },\n\n /* Handle complete */\n complete() {\n el.removeAttribute(\"data-md-state\")\n }\n })\n\n /* Create and return component */\n return (\n feature(\"navigation.tabs.sticky\")\n ? of({ hidden: false })\n : watchTabs(el, options)\n )\n .pipe(\n tap(state => push$.next(state)),\n finalize(() => push$.complete()),\n map(state => ({ ref: el, ...state }))\n )\n })\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n Subject,\n bufferCount,\n combineLatest,\n debounceTime,\n defer,\n distinctUntilChanged,\n distinctUntilKeyChanged,\n finalize,\n map,\n of,\n scan,\n startWith,\n switchMap,\n takeLast,\n takeUntil,\n tap,\n withLatestFrom\n} from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport {\n Viewport,\n getElements,\n getLocation,\n getOptionalElement,\n watchElementSize\n} from \"~/browser\"\n\nimport { Component } from \"../_\"\nimport { Header } from \"../header\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Table of contents\n */\nexport interface TableOfContents {\n prev: HTMLAnchorElement[][] /* Anchors (previous) */\n next: HTMLAnchorElement[][] /* Anchors (next) */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n viewport$: Observable /* Viewport observable */\n header$: Observable
    /* Header observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n viewport$: Observable /* Viewport observable */\n header$: Observable
    /* Header observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch table of contents\n *\n * This is effectively a scroll spy implementation which will account for the\n * fixed header and automatically re-calculate anchor offsets when the viewport\n * is resized. The returned observable will only emit if the table of contents\n * needs to be repainted.\n *\n * This implementation tracks an anchor element's entire path starting from its\n * level up to the top-most anchor element, e.g. `[h3, h2, h1]`. Although the\n * Material theme currently doesn't make use of this information, it enables\n * the styling of the entire hierarchy through customization.\n *\n * Note that the current anchor is the last item of the `prev` anchor list.\n *\n * @param el - Table of contents element\n * @param options - Options\n *\n * @returns Table of contents observable\n */\nexport function watchTableOfContents(\n el: HTMLElement, { viewport$, header$ }: WatchOptions\n): Observable {\n const table = new Map()\n\n /* Compute anchor-to-target mapping */\n const anchors = getElements(\"[href^=\\\\#]\", el)\n for (const anchor of anchors) {\n const id = decodeURIComponent(anchor.hash.substring(1))\n const target = getOptionalElement(`[id=\"${id}\"]`)\n if (typeof target !== \"undefined\")\n table.set(anchor, target)\n }\n\n /* Compute necessary adjustment for header */\n const adjust$ = header$\n .pipe(\n map(header => 24 + header.height)\n )\n\n /* Compute partition of previous and next anchors */\n const partition$ = watchElementSize(document.body)\n .pipe(\n distinctUntilKeyChanged(\"height\"),\n\n /* Build index to map anchor paths to vertical offsets */\n switchMap(body => defer(() => {\n let path: HTMLAnchorElement[] = []\n return of([...table].reduce((index, [anchor, target]) => {\n while (path.length) {\n const last = table.get(path[path.length - 1])!\n if (last.tagName >= target.tagName) {\n path.pop()\n } else {\n break\n }\n }\n\n /* If the current anchor is hidden, continue with its parent */\n let offset = target.offsetTop\n while (!offset && target.parentElement) {\n target = target.parentElement\n offset = target.offsetTop\n }\n\n /* Map reversed anchor path to vertical offset */\n return index.set(\n [...path = [...path, anchor]].reverse(),\n offset\n )\n }, new Map()))\n })\n .pipe(\n\n /* Sort index by vertical offset (see https://bit.ly/30z6QSO) */\n map(index => new Map([...index].sort(([, a], [, b]) => a - b))),\n\n /* Re-compute partition when viewport offset changes */\n switchMap(index => combineLatest([viewport$, adjust$])\n .pipe(\n scan(([prev, next], [{ offset: { y }, size }, adjust]) => {\n const last = y + size.height >= Math.floor(body.height)\n\n /* Look forward */\n while (next.length) {\n const [, offset] = next[0]\n if (offset - adjust < y || last) {\n prev = [...prev, next.shift()!]\n } else {\n break\n }\n }\n\n /* Look backward */\n while (prev.length) {\n const [, offset] = prev[prev.length - 1]\n if (offset - adjust >= y && !last) {\n next = [prev.pop()!, ...next]\n } else {\n break\n }\n }\n\n /* Return partition */\n return [prev, next]\n }, [[], [...index]]),\n distinctUntilChanged((a, b) => (\n a[0] === b[0] &&\n a[1] === b[1]\n ))\n )\n )\n )\n )\n )\n\n /* Compute and return anchor list migrations */\n return partition$\n .pipe(\n map(([prev, next]) => ({\n prev: prev.map(([path]) => path),\n next: next.map(([path]) => path)\n })),\n\n /* Extract anchor list migrations */\n startWith({ prev: [], next: [] }),\n bufferCount(2, 1),\n map(([a, b]) => {\n\n /* Moving down */\n if (a.prev.length < b.prev.length) {\n return {\n prev: b.prev.slice(Math.max(0, a.prev.length - 1), b.prev.length),\n next: []\n }\n\n /* Moving up */\n } else {\n return {\n prev: b.prev.slice(-1),\n next: b.next.slice(0, b.next.length - a.next.length)\n }\n }\n })\n )\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Mount table of contents\n *\n * @param el - Table of contents element\n * @param options - Options\n *\n * @returns Table of contents component observable\n */\nexport function mountTableOfContents(\n el: HTMLElement, { viewport$, header$ }: MountOptions\n): Observable> {\n return defer(() => {\n const push$ = new Subject()\n push$.subscribe(({ prev, next }) => {\n\n /* Look forward */\n for (const [anchor] of next) {\n anchor.removeAttribute(\"data-md-state\")\n anchor.classList.remove(\n \"md-nav__link--active\"\n )\n }\n\n /* Look backward */\n for (const [index, [anchor]] of prev.entries()) {\n anchor.setAttribute(\"data-md-state\", \"blur\")\n anchor.classList.toggle(\n \"md-nav__link--active\",\n index === prev.length - 1\n )\n }\n })\n\n /* Set up anchor tracking, if enabled */\n if (feature(\"navigation.tracking\"))\n viewport$\n .pipe(\n takeUntil(push$.pipe(takeLast(1))),\n distinctUntilKeyChanged(\"offset\"),\n debounceTime(250),\n withLatestFrom(push$)\n )\n .subscribe(([, { prev }]) => {\n const url = getLocation()\n\n /* Set hash fragment to active anchor */\n const anchor = prev[prev.length - 1]\n if (anchor && anchor.length) {\n const [active] = anchor\n const { hash } = new URL(active.href)\n if (url.hash !== hash) {\n url.hash = hash\n history.replaceState({}, \"\", `${url}`)\n }\n\n /* Reset anchor when at the top */\n } else {\n url.hash = \"\"\n history.replaceState({}, \"\", `${url}`)\n }\n })\n\n /* Create and return component */\n return watchTableOfContents(el, { viewport$, header$ })\n .pipe(\n tap(state => push$.next(state)),\n finalize(() => push$.complete()),\n map(state => ({ ref: el, ...state }))\n )\n })\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n Subject,\n bufferCount,\n combineLatest,\n distinctUntilChanged,\n distinctUntilKeyChanged,\n endWith,\n finalize,\n map,\n repeat,\n skip,\n takeLast,\n takeUntil,\n tap\n} from \"rxjs\"\n\nimport { Viewport } from \"~/browser\"\n\nimport { Component } from \"../_\"\nimport { Header } from \"../header\"\nimport { Main } from \"../main\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Back-to-top button\n */\nexport interface BackToTop {\n hidden: boolean /* Back-to-top button is hidden */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n viewport$: Observable /* Viewport observable */\n main$: Observable
    /* Main area observable */\n target$: Observable /* Location target observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n viewport$: Observable /* Viewport observable */\n header$: Observable
    /* Header observable */\n main$: Observable
    /* Main area observable */\n target$: Observable /* Location target observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch back-to-top\n *\n * @param _el - Back-to-top element\n * @param options - Options\n *\n * @returns Back-to-top observable\n */\nexport function watchBackToTop(\n _el: HTMLElement, { viewport$, main$, target$ }: WatchOptions\n): Observable {\n\n /* Compute direction */\n const direction$ = viewport$\n .pipe(\n map(({ offset: { y } }) => y),\n bufferCount(2, 1),\n map(([a, b]) => a > b && b > 0),\n distinctUntilChanged()\n )\n\n /* Compute whether main area is active */\n const active$ = main$\n .pipe(\n map(({ active }) => active)\n )\n\n /* Compute threshold for hiding */\n return combineLatest([active$, direction$])\n .pipe(\n map(([active, direction]) => !(active && direction)),\n distinctUntilChanged(),\n takeUntil(target$.pipe(skip(1))),\n endWith(true),\n repeat({ delay: 250 }),\n map(hidden => ({ hidden }))\n )\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Mount back-to-top\n *\n * @param el - Back-to-top element\n * @param options - Options\n *\n * @returns Back-to-top component observable\n */\nexport function mountBackToTop(\n el: HTMLElement, { viewport$, header$, main$, target$ }: MountOptions\n): Observable> {\n const push$ = new Subject()\n push$.subscribe({\n\n /* Handle emission */\n next({ hidden }) {\n if (hidden) {\n el.setAttribute(\"data-md-state\", \"hidden\")\n el.setAttribute(\"tabindex\", \"-1\")\n el.blur()\n } else {\n el.removeAttribute(\"data-md-state\")\n el.removeAttribute(\"tabindex\")\n }\n },\n\n /* Handle complete */\n complete() {\n el.style.top = \"\"\n el.setAttribute(\"data-md-state\", \"hidden\")\n el.removeAttribute(\"tabindex\")\n }\n })\n\n /* Watch header height */\n header$\n .pipe(\n takeUntil(push$.pipe(endWith(0), takeLast(1))),\n distinctUntilKeyChanged(\"height\")\n )\n .subscribe(({ height }) => {\n el.style.top = `${height + 16}px`\n })\n\n /* Create and return component */\n return watchBackToTop(el, { viewport$, main$, target$ })\n .pipe(\n tap(state => push$.next(state)),\n finalize(() => push$.complete()),\n map(state => ({ ref: el, ...state }))\n )\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n fromEvent,\n mapTo,\n mergeMap,\n of,\n switchMap,\n takeWhile,\n tap,\n withLatestFrom\n} from \"rxjs\"\n\nimport { getElements } from \"~/browser\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Patch options\n */\ninterface PatchOptions {\n document$: Observable /* Document observable */\n tablet$: Observable /* Media tablet observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Patch indeterminate checkboxes\n *\n * This function replaces the indeterminate \"pseudo state\" with the actual\n * indeterminate state, which is used to keep navigation always expanded.\n *\n * @param options - Options\n */\nexport function patchIndeterminate(\n { document$, tablet$ }: PatchOptions\n): void {\n document$\n .pipe(\n switchMap(() => of(...getElements(\n \"[data-md-state=indeterminate]\"\n ))),\n tap(el => {\n el.indeterminate = true\n el.checked = false\n }),\n mergeMap(el => fromEvent(el, \"change\")\n .pipe(\n takeWhile(() => el.hasAttribute(\"data-md-state\")),\n mapTo(el)\n )\n ),\n withLatestFrom(tablet$)\n )\n .subscribe(([el, tablet]) => {\n el.removeAttribute(\"data-md-state\")\n if (tablet)\n el.checked = false\n })\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n filter,\n fromEvent,\n mapTo,\n mergeMap,\n of,\n switchMap,\n tap\n} from \"rxjs\"\n\nimport { getElements } from \"~/browser\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Patch options\n */\ninterface PatchOptions {\n document$: Observable /* Document observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Check whether the given device is an Apple device\n *\n * @returns Test result\n */\nfunction isAppleDevice(): boolean {\n return /(iPad|iPhone|iPod)/.test(navigator.userAgent)\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Patch all elements with `data-md-scrollfix` attributes\n *\n * This is a year-old patch which ensures that overflow scrolling works at the\n * top and bottom of containers on iOS by ensuring a `1px` scroll offset upon\n * the start of a touch event.\n *\n * @see https://bit.ly/2SCtAOO - Original source\n *\n * @param options - Options\n */\nexport function patchScrollfix(\n { document$ }: PatchOptions\n): void {\n document$\n .pipe(\n switchMap(() => of(...getElements(\"[data-md-scrollfix]\"))),\n tap(el => el.removeAttribute(\"data-md-scrollfix\")),\n filter(isAppleDevice),\n mergeMap(el => fromEvent(el, \"touchstart\")\n .pipe(\n mapTo(el)\n )\n )\n )\n .subscribe(el => {\n const top = el.scrollTop\n\n /* We're at the top of the container */\n if (top === 0) {\n el.scrollTop = 1\n\n /* We're at the bottom of the container */\n } else if (top + el.offsetHeight === el.scrollHeight) {\n el.scrollTop = top - 1\n }\n })\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n Observable,\n combineLatest,\n delay,\n map,\n of,\n switchMap,\n withLatestFrom\n} from \"rxjs\"\n\nimport {\n Viewport,\n watchToggle\n} from \"~/browser\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Patch options\n */\ninterface PatchOptions {\n viewport$: Observable /* Viewport observable */\n tablet$: Observable /* Media tablet observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Patch the document body to lock when search is open\n *\n * For mobile and tablet viewports, the search is rendered full screen, which\n * leads to scroll leaking when at the top or bottom of the search result. This\n * function locks the body when the search is in full screen mode, and restores\n * the scroll position when leaving.\n *\n * @param options - Options\n */\nexport function patchScrolllock(\n { viewport$, tablet$ }: PatchOptions\n): void {\n combineLatest([watchToggle(\"search\"), tablet$])\n .pipe(\n map(([active, tablet]) => active && !tablet),\n switchMap(active => of(active)\n .pipe(\n delay(active ? 400 : 100)\n )\n ),\n withLatestFrom(viewport$)\n )\n .subscribe(([active, { offset: { y }}]) => {\n if (active) {\n document.body.setAttribute(\"data-md-state\", \"lock\")\n document.body.style.top = `-${y}px`\n } else {\n const value = -1 * parseInt(document.body.style.top, 10)\n document.body.removeAttribute(\"data-md-state\")\n document.body.style.top = \"\"\n if (value)\n window.scrollTo(0, value)\n }\n })\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n/* ----------------------------------------------------------------------------\n * Polyfills\n * ------------------------------------------------------------------------- */\n\n/* Polyfill `Object.entries` */\nif (!Object.entries)\n Object.entries = function (obj: object) {\n const data: [string, string][] = []\n for (const key of Object.keys(obj))\n // @ts-expect-error - ignore property access warning\n data.push([key, obj[key]])\n\n /* Return entries */\n return data\n }\n\n/* Polyfill `Object.values` */\nif (!Object.values)\n Object.values = function (obj: object) {\n const data: string[] = []\n for (const key of Object.keys(obj))\n // @ts-expect-error - ignore property access warning\n data.push(obj[key])\n\n /* Return values */\n return data\n }\n\n/* ------------------------------------------------------------------------- */\n\n/* Polyfills for `Element` */\nif (typeof Element !== \"undefined\") {\n\n /* Polyfill `Element.scrollTo` */\n if (!Element.prototype.scrollTo)\n Element.prototype.scrollTo = function (\n x?: ScrollToOptions | number, y?: number\n ): void {\n if (typeof x === \"object\") {\n this.scrollLeft = x.left!\n this.scrollTop = x.top!\n } else {\n this.scrollLeft = x!\n this.scrollTop = y!\n }\n }\n\n /* Polyfill `Element.replaceWith` */\n if (!Element.prototype.replaceWith)\n Element.prototype.replaceWith = function (\n ...nodes: Array\n ): void {\n const parent = this.parentNode\n if (parent) {\n if (nodes.length === 0)\n parent.removeChild(this)\n\n /* Replace children and create text nodes */\n for (let i = nodes.length - 1; i >= 0; i--) {\n let node = nodes[i]\n if (typeof node !== \"object\")\n node = document.createTextNode(node)\n else if (node.parentNode)\n node.parentNode.removeChild(node)\n\n /* Replace child or insert before previous sibling */\n if (!i)\n parent.replaceChild(node, this)\n else\n parent.insertBefore(this.previousSibling!, node)\n }\n }\n }\n}\n"], - "mappings": "4iCAAA,oBAAC,UAAU,EAAQ,EAAS,CAC1B,MAAO,KAAY,UAAY,MAAO,KAAW,YAAc,IAC/D,MAAO,SAAW,YAAc,OAAO,IAAM,OAAO,GACnD,MACD,GAAO,UAAY,CAAE,aASrB,WAAmC,EAAO,CACxC,GAAI,GAAmB,GACnB,EAA0B,GAC1B,EAAiC,KAEjC,EAAsB,CACxB,KAAM,GACN,OAAQ,GACR,IAAK,GACL,IAAK,GACL,MAAO,GACP,SAAU,GACV,OAAQ,GACR,KAAM,GACN,MAAO,GACP,KAAM,GACN,KAAM,GACN,SAAU,GACV,iBAAkB,IAQpB,WAA4B,EAAI,CAC9B,MACE,MACA,IAAO,UACP,EAAG,WAAa,QAChB,EAAG,WAAa,QAChB,aAAe,IACf,YAAc,GAAG,WAcrB,WAAuC,EAAI,CACzC,GAAI,IAAO,EAAG,KACV,GAAU,EAAG,QAUjB,MARI,QAAY,SAAW,EAAoB,KAAS,CAAC,EAAG,UAIxD,KAAY,YAAc,CAAC,EAAG,UAI9B,EAAG,mBAYT,WAA8B,EAAI,CAChC,AAAI,EAAG,UAAU,SAAS,kBAG1B,GAAG,UAAU,IAAI,iBACjB,EAAG,aAAa,2BAA4B,KAQ9C,WAAiC,EAAI,CACnC,AAAI,CAAC,EAAG,aAAa,6BAGrB,GAAG,UAAU,OAAO,iBACpB,EAAG,gBAAgB,6BAWrB,WAAmB,EAAG,CACpB,AAAI,EAAE,SAAW,EAAE,QAAU,EAAE,SAI3B,GAAmB,EAAM,gBAC3B,EAAqB,EAAM,eAG7B,EAAmB,IAWrB,WAAuB,EAAG,CACxB,EAAmB,GAUrB,WAAiB,EAAG,CAElB,AAAI,CAAC,EAAmB,EAAE,SAItB,IAAoB,EAA8B,EAAE,UACtD,EAAqB,EAAE,QAQ3B,WAAgB,EAAG,CACjB,AAAI,CAAC,EAAmB,EAAE,SAKxB,GAAE,OAAO,UAAU,SAAS,kBAC5B,EAAE,OAAO,aAAa,8BAMtB,GAA0B,GAC1B,OAAO,aAAa,GACpB,EAAiC,OAAO,WAAW,UAAW,CAC5D,EAA0B,IACzB,KACH,EAAwB,EAAE,SAS9B,WAA4B,EAAG,CAC7B,AAAI,SAAS,kBAAoB,UAK3B,IACF,GAAmB,IAErB,KAUJ,YAA0C,CACxC,SAAS,iBAAiB,YAAa,GACvC,SAAS,iBAAiB,YAAa,GACvC,SAAS,iBAAiB,UAAW,GACrC,SAAS,iBAAiB,cAAe,GACzC,SAAS,iBAAiB,cAAe,GACzC,SAAS,iBAAiB,YAAa,GACvC,SAAS,iBAAiB,YAAa,GACvC,SAAS,iBAAiB,aAAc,GACxC,SAAS,iBAAiB,WAAY,GAGxC,YAA6C,CAC3C,SAAS,oBAAoB,YAAa,GAC1C,SAAS,oBAAoB,YAAa,GAC1C,SAAS,oBAAoB,UAAW,GACxC,SAAS,oBAAoB,cAAe,GAC5C,SAAS,oBAAoB,cAAe,GAC5C,SAAS,oBAAoB,YAAa,GAC1C,SAAS,oBAAoB,YAAa,GAC1C,SAAS,oBAAoB,aAAc,GAC3C,SAAS,oBAAoB,WAAY,GAU3C,WAA8B,EAAG,CAG/B,AAAI,EAAE,OAAO,UAAY,EAAE,OAAO,SAAS,gBAAkB,QAI7D,GAAmB,GACnB,KAMF,SAAS,iBAAiB,UAAW,EAAW,IAChD,SAAS,iBAAiB,YAAa,EAAe,IACtD,SAAS,iBAAiB,cAAe,EAAe,IACxD,SAAS,iBAAiB,aAAc,EAAe,IACvD,SAAS,iBAAiB,mBAAoB,EAAoB,IAElE,IAMA,EAAM,iBAAiB,QAAS,EAAS,IACzC,EAAM,iBAAiB,OAAQ,EAAQ,IAOvC,AAAI,EAAM,WAAa,KAAK,wBAA0B,EAAM,KAI1D,EAAM,KAAK,aAAa,wBAAyB,IACxC,EAAM,WAAa,KAAK,eACjC,UAAS,gBAAgB,UAAU,IAAI,oBACvC,SAAS,gBAAgB,aAAa,wBAAyB,KAOnE,GAAI,MAAO,SAAW,aAAe,MAAO,WAAa,YAAa,CAIpE,OAAO,0BAA4B,EAInC,GAAI,GAEJ,GAAI,CACF,EAAQ,GAAI,aAAY,sCACjB,EAAP,CAEA,EAAQ,SAAS,YAAY,eAC7B,EAAM,gBAAgB,+BAAgC,GAAO,GAAO,IAGtE,OAAO,cAAc,GAGvB,AAAI,MAAO,WAAa,aAGtB,EAA0B,cCpT9B,eAAC,UAAS,EAAQ,CAOhB,GAAI,GAA6B,UAAW,CAC1C,GAAI,CACF,MAAO,CAAC,CAAC,OAAO,eACT,EAAP,CACA,MAAO,KAKP,EAAoB,IAEpB,EAAiB,SAAS,EAAO,CACnC,GAAI,GAAW,CACb,KAAM,UAAW,CACf,GAAI,GAAQ,EAAM,QAClB,MAAO,CAAE,KAAM,IAAU,OAAQ,MAAO,KAI5C,MAAI,IACF,GAAS,OAAO,UAAY,UAAW,CACrC,MAAO,KAIJ,GAOL,EAAiB,SAAS,EAAO,CACnC,MAAO,oBAAmB,GAAO,QAAQ,OAAQ,MAG/C,EAAmB,SAAS,EAAO,CACrC,MAAO,oBAAmB,OAAO,GAAO,QAAQ,MAAO,OAGrD,EAA0B,UAAW,CAEvC,GAAI,GAAkB,SAAS,EAAc,CAC3C,OAAO,eAAe,KAAM,WAAY,CAAE,SAAU,GAAM,MAAO,KACjE,GAAI,GAAqB,MAAO,GAEhC,GAAI,IAAuB,YAEpB,GAAI,IAAuB,SAChC,AAAI,IAAiB,IACnB,KAAK,YAAY,WAEV,YAAwB,GAAiB,CAClD,GAAI,GAAQ,KACZ,EAAa,QAAQ,SAAS,EAAO,EAAM,CACzC,EAAM,OAAO,EAAM,aAEX,IAAiB,MAAU,IAAuB,SAC5D,GAAI,OAAO,UAAU,SAAS,KAAK,KAAkB,iBACnD,OAAS,GAAI,EAAG,EAAI,EAAa,OAAQ,IAAK,CAC5C,GAAI,GAAQ,EAAa,GACzB,GAAK,OAAO,UAAU,SAAS,KAAK,KAAW,kBAAsB,EAAM,SAAW,EACpF,KAAK,OAAO,EAAM,GAAI,EAAM,QAE5B,MAAM,IAAI,WAAU,4CAA8C,EAAI,mCAI1E,QAAS,KAAO,GACd,AAAI,EAAa,eAAe,IAC9B,KAAK,OAAO,EAAK,EAAa,QAKpC,MAAM,IAAI,WAAU,iDAIpB,EAAQ,EAAgB,UAE5B,EAAM,OAAS,SAAS,EAAM,EAAO,CACnC,AAAI,IAAQ,MAAK,SACf,KAAK,SAAS,GAAM,KAAK,OAAO,IAEhC,KAAK,SAAS,GAAQ,CAAC,OAAO,KAIlC,EAAM,OAAS,SAAS,EAAM,CAC5B,MAAO,MAAK,SAAS,IAGvB,EAAM,IAAM,SAAS,EAAM,CACzB,MAAQ,KAAQ,MAAK,SAAY,KAAK,SAAS,GAAM,GAAK,MAG5D,EAAM,OAAS,SAAS,EAAM,CAC5B,MAAQ,KAAQ,MAAK,SAAY,KAAK,SAAS,GAAM,MAAM,GAAK,IAGlE,EAAM,IAAM,SAAS,EAAM,CACzB,MAAQ,KAAQ,MAAK,UAGvB,EAAM,IAAM,SAAS,EAAM,EAAO,CAChC,KAAK,SAAS,GAAQ,CAAC,OAAO,KAGhC,EAAM,QAAU,SAAS,EAAU,EAAS,CAC1C,GAAI,GACJ,OAAS,KAAQ,MAAK,SACpB,GAAI,KAAK,SAAS,eAAe,GAAO,CACtC,EAAU,KAAK,SAAS,GACxB,OAAS,GAAI,EAAG,EAAI,EAAQ,OAAQ,IAClC,EAAS,KAAK,EAAS,EAAQ,GAAI,EAAM,QAMjD,EAAM,KAAO,UAAW,CACtB,GAAI,GAAQ,GACZ,YAAK,QAAQ,SAAS,EAAO,EAAM,CACjC,EAAM,KAAK,KAEN,EAAe,IAGxB,EAAM,OAAS,UAAW,CACxB,GAAI,GAAQ,GACZ,YAAK,QAAQ,SAAS,EAAO,CAC3B,EAAM,KAAK,KAEN,EAAe,IAGxB,EAAM,QAAU,UAAW,CACzB,GAAI,GAAQ,GACZ,YAAK,QAAQ,SAAS,EAAO,EAAM,CACjC,EAAM,KAAK,CAAC,EAAM,MAEb,EAAe,IAGpB,GACF,GAAM,OAAO,UAAY,EAAM,SAGjC,EAAM,SAAW,UAAW,CAC1B,GAAI,GAAc,GAClB,YAAK,QAAQ,SAAS,EAAO,EAAM,CACjC,EAAY,KAAK,EAAe,GAAQ,IAAM,EAAe,MAExD,EAAY,KAAK,MAI1B,EAAO,gBAAkB,GAGvB,EAAkC,UAAW,CAC/C,GAAI,CACF,GAAI,GAAkB,EAAO,gBAE7B,MACG,IAAI,GAAgB,QAAQ,aAAe,OAC3C,MAAO,GAAgB,UAAU,KAAQ,YACzC,MAAO,GAAgB,UAAU,SAAY,iBAEzC,EAAP,CACA,MAAO,KAIX,AAAK,KACH,IAGF,GAAI,GAAQ,EAAO,gBAAgB,UAEnC,AAAI,MAAO,GAAM,MAAS,YACxB,GAAM,KAAO,UAAW,CACtB,GAAI,GAAQ,KACR,EAAQ,GACZ,KAAK,QAAQ,SAAS,EAAO,EAAM,CACjC,EAAM,KAAK,CAAC,EAAM,IACb,EAAM,UACT,EAAM,OAAO,KAGjB,EAAM,KAAK,SAAS,EAAG,EAAG,CACxB,MAAI,GAAE,GAAK,EAAE,GACJ,GACE,EAAE,GAAK,EAAE,GACX,EAEA,IAGP,EAAM,UACR,GAAM,SAAW,IAEnB,OAAS,GAAI,EAAG,EAAI,EAAM,OAAQ,IAChC,KAAK,OAAO,EAAM,GAAG,GAAI,EAAM,GAAG,MAKpC,MAAO,GAAM,aAAgB,YAC/B,OAAO,eAAe,EAAO,cAAe,CAC1C,WAAY,GACZ,aAAc,GACd,SAAU,GACV,MAAO,SAAS,EAAc,CAC5B,GAAI,KAAK,SACP,KAAK,SAAW,OACX,CACL,GAAI,GAAO,GACX,KAAK,QAAQ,SAAS,EAAO,EAAM,CACjC,EAAK,KAAK,KAEZ,OAAS,GAAI,EAAG,EAAI,EAAK,OAAQ,IAC/B,KAAK,OAAO,EAAK,IAIrB,EAAe,EAAa,QAAQ,MAAO,IAG3C,OAFI,GAAa,EAAa,MAAM,KAChC,EACK,EAAI,EAAG,EAAI,EAAW,OAAQ,IACrC,EAAY,EAAW,GAAG,MAAM,KAChC,KAAK,OACH,EAAiB,EAAU,IAC1B,EAAU,OAAS,EAAK,EAAiB,EAAU,IAAM,SAUnE,MAAO,SAAW,YAAe,OAC5B,MAAO,SAAW,YAAe,OACjC,MAAO,OAAS,YAAe,KAAO,IAG9C,AAAC,UAAS,EAAQ,CAOhB,GAAI,GAAwB,UAAW,CACrC,GAAI,CACF,GAAI,GAAI,GAAI,GAAO,IAAI,IAAK,YAC5B,SAAE,SAAW,MACL,EAAE,OAAS,kBAAqB,EAAE,mBACnC,EAAP,CACA,MAAO,KAKP,EAAc,UAAW,CAC3B,GAAI,GAAO,EAAO,IAEd,EAAM,SAAS,EAAK,EAAM,CAC5B,AAAI,MAAO,IAAQ,UAAU,GAAM,OAAO,IACtC,GAAQ,MAAO,IAAS,UAAU,GAAO,OAAO,IAGpD,GAAI,GAAM,SAAU,EACpB,GAAI,GAAS,GAAO,WAAa,QAAU,IAAS,EAAO,SAAS,MAAO,CACzE,EAAO,EAAK,cACZ,EAAM,SAAS,eAAe,mBAAmB,IACjD,EAAc,EAAI,cAAc,QAChC,EAAY,KAAO,EACnB,EAAI,KAAK,YAAY,GACrB,GAAI,CACF,GAAI,EAAY,KAAK,QAAQ,KAAU,EAAG,KAAM,IAAI,OAAM,EAAY,YAC/D,EAAP,CACA,KAAM,IAAI,OAAM,0BAA4B,EAAO,WAAa,IAIpE,GAAI,GAAgB,EAAI,cAAc,KACtC,EAAc,KAAO,EACjB,GACF,GAAI,KAAK,YAAY,GACrB,EAAc,KAAO,EAAc,MAGrC,GAAI,GAAe,EAAI,cAAc,SAIrC,GAHA,EAAa,KAAO,MACpB,EAAa,MAAQ,EAEjB,EAAc,WAAa,KAAO,CAAC,IAAI,KAAK,EAAc,OAAU,CAAC,EAAa,iBAAmB,CAAC,EACxG,KAAM,IAAI,WAAU,eAGtB,OAAO,eAAe,KAAM,iBAAkB,CAC5C,MAAO,IAKT,GAAI,GAAe,GAAI,GAAO,gBAAgB,KAAK,QAC/C,EAAqB,GACrB,EAA2B,GAC3B,EAAQ,KACZ,CAAC,SAAU,SAAU,OAAO,QAAQ,SAAS,EAAY,CACvD,GAAI,IAAS,EAAa,GAC1B,EAAa,GAAc,UAAW,CACpC,GAAO,MAAM,EAAc,WACvB,GACF,GAA2B,GAC3B,EAAM,OAAS,EAAa,WAC5B,EAA2B,OAKjC,OAAO,eAAe,KAAM,eAAgB,CAC1C,MAAO,EACP,WAAY,KAGd,GAAI,GAAS,OACb,OAAO,eAAe,KAAM,sBAAuB,CACjD,WAAY,GACZ,aAAc,GACd,SAAU,GACV,MAAO,UAAW,CAChB,AAAI,KAAK,SAAW,GAClB,GAAS,KAAK,OACV,GACF,GAAqB,GACrB,KAAK,aAAa,YAAY,KAAK,QACnC,EAAqB,SAO3B,EAAQ,EAAI,UAEZ,EAA6B,SAAS,EAAe,CACvD,OAAO,eAAe,EAAO,EAAe,CAC1C,IAAK,UAAW,CACd,MAAO,MAAK,eAAe,IAE7B,IAAK,SAAS,EAAO,CACnB,KAAK,eAAe,GAAiB,GAEvC,WAAY,MAIhB,CAAC,OAAQ,OAAQ,WAAY,OAAQ,YAClC,QAAQ,SAAS,EAAe,CAC/B,EAA2B,KAG/B,OAAO,eAAe,EAAO,SAAU,CACrC,IAAK,UAAW,CACd,MAAO,MAAK,eAAe,QAE7B,IAAK,SAAS,EAAO,CACnB,KAAK,eAAe,OAAY,EAChC,KAAK,uBAEP,WAAY,KAGd,OAAO,iBAAiB,EAAO,CAE7B,SAAY,CACV,IAAK,UAAW,CACd,GAAI,GAAQ,KACZ,MAAO,WAAW,CAChB,MAAO,GAAM,QAKnB,KAAQ,CACN,IAAK,UAAW,CACd,MAAO,MAAK,eAAe,KAAK,QAAQ,MAAO,KAEjD,IAAK,SAAS,EAAO,CACnB,KAAK,eAAe,KAAO,EAC3B,KAAK,uBAEP,WAAY,IAGd,SAAY,CACV,IAAK,UAAW,CACd,MAAO,MAAK,eAAe,SAAS,QAAQ,SAAU,MAExD,IAAK,SAAS,EAAO,CACnB,KAAK,eAAe,SAAW,GAEjC,WAAY,IAGd,OAAU,CACR,IAAK,UAAW,CAEd,GAAI,GAAe,CAAE,QAAS,GAAI,SAAU,IAAK,OAAQ,IAAK,KAAK,eAAe,UAI9E,EAAkB,KAAK,eAAe,MAAQ,GAChD,KAAK,eAAe,OAAS,GAE/B,MAAO,MAAK,eAAe,SACzB,KACA,KAAK,eAAe,SACnB,GAAmB,IAAM,KAAK,eAAe,KAAQ,KAE1D,WAAY,IAGd,SAAY,CACV,IAAK,UAAW,CACd,MAAO,IAET,IAAK,SAAS,EAAO,GAErB,WAAY,IAGd,SAAY,CACV,IAAK,UAAW,CACd,MAAO,IAET,IAAK,SAAS,EAAO,GAErB,WAAY,MAIhB,EAAI,gBAAkB,SAAS,EAAM,CACnC,MAAO,GAAK,gBAAgB,MAAM,EAAM,YAG1C,EAAI,gBAAkB,SAAS,EAAK,CAClC,MAAO,GAAK,gBAAgB,MAAM,EAAM,YAG1C,EAAO,IAAM,GAQf,GAJK,KACH,IAGG,EAAO,WAAa,QAAW,CAAE,WAAY,GAAO,UAAW,CAClE,GAAI,GAAY,UAAW,CACzB,MAAO,GAAO,SAAS,SAAW,KAAO,EAAO,SAAS,SAAY,GAAO,SAAS,KAAQ,IAAM,EAAO,SAAS,KAAQ,KAG7H,GAAI,CACF,OAAO,eAAe,EAAO,SAAU,SAAU,CAC/C,IAAK,EACL,WAAY,WAEP,EAAP,CACA,YAAY,UAAW,CACrB,EAAO,SAAS,OAAS,KACxB,SAKN,MAAO,SAAW,YAAe,OAC5B,MAAO,SAAW,YAAe,OACjC,MAAO,OAAS,YAAe,KAAO,MC3e9C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gFAeA,GAAI,IACA,GACA,GACA,GACA,GACA,GACA,GACA,GACA,GACA,GACA,GACA,GACA,GACA,GACA,GACA,GACA,GACA,GACA,GACA,GACA,GACA,GACA,GACA,GACJ,AAAC,UAAU,EAAS,CAChB,GAAI,GAAO,MAAO,SAAW,SAAW,OAAS,MAAO,OAAS,SAAW,KAAO,MAAO,OAAS,SAAW,KAAO,GACrH,AAAI,MAAO,SAAW,YAAc,OAAO,IACvC,OAAO,QAAS,CAAC,WAAY,SAAU,EAAS,CAAE,EAAQ,EAAe,EAAM,EAAe,OAE7F,AAAI,MAAO,KAAW,UAAY,MAAO,IAAO,SAAY,SAC7D,EAAQ,EAAe,EAAM,EAAe,GAAO,WAGnD,EAAQ,EAAe,IAE3B,WAAwB,EAAS,EAAU,CACvC,MAAI,KAAY,GACZ,CAAI,MAAO,QAAO,QAAW,WACzB,OAAO,eAAe,EAAS,aAAc,CAAE,MAAO,KAGtD,EAAQ,WAAa,IAGtB,SAAU,EAAI,EAAG,CAAE,MAAO,GAAQ,GAAM,EAAW,EAAS,EAAI,GAAK,MAGnF,SAAU,EAAU,CACjB,GAAI,GAAgB,OAAO,gBACtB,CAAE,UAAW,aAAgB,QAAS,SAAU,EAAG,EAAG,CAAE,EAAE,UAAY,IACvE,SAAU,EAAG,EAAG,CAAE,OAAS,KAAK,GAAG,AAAI,OAAO,UAAU,eAAe,KAAK,EAAG,IAAI,GAAE,GAAK,EAAE,KAEhG,GAAY,SAAU,EAAG,EAAG,CACxB,GAAI,MAAO,IAAM,YAAc,IAAM,KACjC,KAAM,IAAI,WAAU,uBAAyB,OAAO,GAAK,iCAC7D,EAAc,EAAG,GACjB,YAAc,CAAE,KAAK,YAAc,EACnC,EAAE,UAAY,IAAM,KAAO,OAAO,OAAO,GAAM,GAAG,UAAY,EAAE,UAAW,GAAI,KAGnF,GAAW,OAAO,QAAU,SAAU,EAAG,CACrC,OAAS,GAAG,EAAI,EAAG,EAAI,UAAU,OAAQ,EAAI,EAAG,IAAK,CACjD,EAAI,UAAU,GACd,OAAS,KAAK,GAAG,AAAI,OAAO,UAAU,eAAe,KAAK,EAAG,IAAI,GAAE,GAAK,EAAE,IAE9E,MAAO,IAGX,GAAS,SAAU,EAAG,EAAG,CACrB,GAAI,GAAI,GACR,OAAS,KAAK,GAAG,AAAI,OAAO,UAAU,eAAe,KAAK,EAAG,IAAM,EAAE,QAAQ,GAAK,GAC9E,GAAE,GAAK,EAAE,IACb,GAAI,GAAK,MAAQ,MAAO,QAAO,uBAA0B,WACrD,OAAS,GAAI,EAAG,EAAI,OAAO,sBAAsB,GAAI,EAAI,EAAE,OAAQ,IAC/D,AAAI,EAAE,QAAQ,EAAE,IAAM,GAAK,OAAO,UAAU,qBAAqB,KAAK,EAAG,EAAE,KACvE,GAAE,EAAE,IAAM,EAAE,EAAE,KAE1B,MAAO,IAGX,GAAa,SAAU,EAAY,EAAQ,EAAK,EAAM,CAClD,GAAI,GAAI,UAAU,OAAQ,EAAI,EAAI,EAAI,EAAS,IAAS,KAAO,EAAO,OAAO,yBAAyB,EAAQ,GAAO,EAAM,EAC3H,GAAI,MAAO,UAAY,UAAY,MAAO,SAAQ,UAAa,WAAY,EAAI,QAAQ,SAAS,EAAY,EAAQ,EAAK,OACpH,QAAS,GAAI,EAAW,OAAS,EAAG,GAAK,EAAG,IAAK,AAAI,GAAI,EAAW,KAAI,GAAK,GAAI,EAAI,EAAE,GAAK,EAAI,EAAI,EAAE,EAAQ,EAAK,GAAK,EAAE,EAAQ,KAAS,GAChJ,MAAO,GAAI,GAAK,GAAK,OAAO,eAAe,EAAQ,EAAK,GAAI,GAGhE,GAAU,SAAU,EAAY,EAAW,CACvC,MAAO,UAAU,EAAQ,EAAK,CAAE,EAAU,EAAQ,EAAK,KAG3D,GAAa,SAAU,EAAa,EAAe,CAC/C,GAAI,MAAO,UAAY,UAAY,MAAO,SAAQ,UAAa,WAAY,MAAO,SAAQ,SAAS,EAAa,IAGpH,GAAY,SAAU,EAAS,EAAY,EAAG,EAAW,CACrD,WAAe,EAAO,CAAE,MAAO,aAAiB,GAAI,EAAQ,GAAI,GAAE,SAAU,EAAS,CAAE,EAAQ,KAC/F,MAAO,IAAK,IAAM,GAAI,UAAU,SAAU,EAAS,EAAQ,CACvD,WAAmB,EAAO,CAAE,GAAI,CAAE,EAAK,EAAU,KAAK,UAAkB,EAAP,CAAY,EAAO,IACpF,WAAkB,EAAO,CAAE,GAAI,CAAE,EAAK,EAAU,MAAS,UAAkB,EAAP,CAAY,EAAO,IACvF,WAAc,EAAQ,CAAE,EAAO,KAAO,EAAQ,EAAO,OAAS,EAAM,EAAO,OAAO,KAAK,EAAW,GAClG,EAAM,GAAY,EAAU,MAAM,EAAS,GAAc,KAAK,WAItE,GAAc,SAAU,EAAS,EAAM,CACnC,GAAI,GAAI,CAAE,MAAO,EAAG,KAAM,UAAW,CAAE,GAAI,EAAE,GAAK,EAAG,KAAM,GAAE,GAAI,MAAO,GAAE,IAAO,KAAM,GAAI,IAAK,IAAM,EAAG,EAAG,EAAG,EAC/G,MAAO,GAAI,CAAE,KAAM,EAAK,GAAI,MAAS,EAAK,GAAI,OAAU,EAAK,IAAM,MAAO,SAAW,YAAe,GAAE,OAAO,UAAY,UAAW,CAAE,MAAO,QAAU,EACvJ,WAAc,EAAG,CAAE,MAAO,UAAU,EAAG,CAAE,MAAO,GAAK,CAAC,EAAG,KACzD,WAAc,EAAI,CACd,GAAI,EAAG,KAAM,IAAI,WAAU,mCAC3B,KAAO,GAAG,GAAI,CACV,GAAI,EAAI,EAAG,GAAM,GAAI,EAAG,GAAK,EAAI,EAAE,OAAY,EAAG,GAAK,EAAE,OAAc,IAAI,EAAE,SAAc,EAAE,KAAK,GAAI,GAAK,EAAE,OAAS,CAAE,GAAI,EAAE,KAAK,EAAG,EAAG,KAAK,KAAM,MAAO,GAE3J,OADI,EAAI,EAAG,GAAG,GAAK,CAAC,EAAG,GAAK,EAAG,EAAE,QACzB,EAAG,QACF,OAAQ,GAAG,EAAI,EAAI,UACnB,GAAG,SAAE,QAAgB,CAAE,MAAO,EAAG,GAAI,KAAM,QAC3C,GAAG,EAAE,QAAS,EAAI,EAAG,GAAI,EAAK,CAAC,GAAI,aACnC,GAAG,EAAK,EAAE,IAAI,MAAO,EAAE,KAAK,MAAO,iBAEpC,GAAM,EAAI,EAAE,KAAM,IAAI,EAAE,OAAS,GAAK,EAAE,EAAE,OAAS,KAAQ,GAAG,KAAO,GAAK,EAAG,KAAO,GAAI,CAAE,EAAI,EAAG,SACjG,GAAI,EAAG,KAAO,GAAM,EAAC,GAAM,EAAG,GAAK,EAAE,IAAM,EAAG,GAAK,EAAE,IAAM,CAAE,EAAE,MAAQ,EAAG,GAAI,MAC9E,GAAI,EAAG,KAAO,GAAK,EAAE,MAAQ,EAAE,GAAI,CAAE,EAAE,MAAQ,EAAE,GAAI,EAAI,EAAI,MAC7D,GAAI,GAAK,EAAE,MAAQ,EAAE,GAAI,CAAE,EAAE,MAAQ,EAAE,GAAI,EAAE,IAAI,KAAK,GAAK,MAC3D,AAAI,EAAE,IAAI,EAAE,IAAI,MAChB,EAAE,KAAK,MAAO,SAEtB,EAAK,EAAK,KAAK,EAAS,SACnB,EAAP,CAAY,EAAK,CAAC,EAAG,GAAI,EAAI,SAAK,CAAU,EAAI,EAAI,EACtD,GAAI,EAAG,GAAK,EAAG,KAAM,GAAG,GAAI,MAAO,CAAE,MAAO,EAAG,GAAK,EAAG,GAAK,OAAQ,KAAM,MAIlF,GAAe,SAAS,EAAG,EAAG,CAC1B,OAAS,KAAK,GAAG,AAAI,IAAM,WAAa,CAAC,OAAO,UAAU,eAAe,KAAK,EAAG,IAAI,GAAgB,EAAG,EAAG,IAG/G,GAAkB,OAAO,OAAU,SAAS,EAAG,EAAG,EAAG,EAAI,CACrD,AAAI,IAAO,QAAW,GAAK,GAC3B,OAAO,eAAe,EAAG,EAAI,CAAE,WAAY,GAAM,IAAK,UAAW,CAAE,MAAO,GAAE,OAC1E,SAAS,EAAG,EAAG,EAAG,EAAI,CACxB,AAAI,IAAO,QAAW,GAAK,GAC3B,EAAE,GAAM,EAAE,IAGd,GAAW,SAAU,EAAG,CACpB,GAAI,GAAI,MAAO,SAAW,YAAc,OAAO,SAAU,EAAI,GAAK,EAAE,GAAI,EAAI,EAC5E,GAAI,EAAG,MAAO,GAAE,KAAK,GACrB,GAAI,GAAK,MAAO,GAAE,QAAW,SAAU,MAAO,CAC1C,KAAM,UAAY,CACd,MAAI,IAAK,GAAK,EAAE,QAAQ,GAAI,QACrB,CAAE,MAAO,GAAK,EAAE,KAAM,KAAM,CAAC,KAG5C,KAAM,IAAI,WAAU,EAAI,0BAA4B,oCAGxD,GAAS,SAAU,EAAG,EAAG,CACrB,GAAI,GAAI,MAAO,SAAW,YAAc,EAAE,OAAO,UACjD,GAAI,CAAC,EAAG,MAAO,GACf,GAAI,GAAI,EAAE,KAAK,GAAI,EAAG,EAAK,GAAI,EAC/B,GAAI,CACA,KAAQ,KAAM,QAAU,KAAM,IAAM,CAAE,GAAI,EAAE,QAAQ,MAAM,EAAG,KAAK,EAAE,aAEjE,EAAP,CAAgB,EAAI,CAAE,MAAO,UAC7B,CACI,GAAI,CACA,AAAI,GAAK,CAAC,EAAE,MAAS,GAAI,EAAE,SAAY,EAAE,KAAK,UAElD,CAAU,GAAI,EAAG,KAAM,GAAE,OAE7B,MAAO,IAIX,GAAW,UAAY,CACnB,OAAS,GAAK,GAAI,EAAI,EAAG,EAAI,UAAU,OAAQ,IAC3C,EAAK,EAAG,OAAO,GAAO,UAAU,KACpC,MAAO,IAIX,GAAiB,UAAY,CACzB,OAAS,GAAI,EAAG,EAAI,EAAG,EAAK,UAAU,OAAQ,EAAI,EAAI,IAAK,GAAK,UAAU,GAAG,OAC7E,OAAS,GAAI,MAAM,GAAI,EAAI,EAAG,EAAI,EAAG,EAAI,EAAI,IACzC,OAAS,GAAI,UAAU,GAAI,EAAI,EAAG,EAAK,EAAE,OAAQ,EAAI,EAAI,IAAK,IAC1D,EAAE,GAAK,EAAE,GACjB,MAAO,IAGX,GAAgB,SAAU,EAAI,EAAM,EAAM,CACtC,GAAI,GAAQ,UAAU,SAAW,EAAG,OAAS,GAAI,EAAG,EAAI,EAAK,OAAQ,EAAI,EAAI,EAAG,IAC5E,AAAI,IAAM,CAAE,KAAK,MACR,IAAI,GAAK,MAAM,UAAU,MAAM,KAAK,EAAM,EAAG,IAClD,EAAG,GAAK,EAAK,IAGrB,MAAO,GAAG,OAAO,GAAM,MAAM,UAAU,MAAM,KAAK,KAGtD,GAAU,SAAU,EAAG,CACnB,MAAO,gBAAgB,IAAW,MAAK,EAAI,EAAG,MAAQ,GAAI,IAAQ,IAGtE,GAAmB,SAAU,EAAS,EAAY,EAAW,CACzD,GAAI,CAAC,OAAO,cAAe,KAAM,IAAI,WAAU,wCAC/C,GAAI,GAAI,EAAU,MAAM,EAAS,GAAc,IAAK,EAAG,EAAI,GAC3D,MAAO,GAAI,GAAI,EAAK,QAAS,EAAK,SAAU,EAAK,UAAW,EAAE,OAAO,eAAiB,UAAY,CAAE,MAAO,OAAS,EACpH,WAAc,EAAG,CAAE,AAAI,EAAE,IAAI,GAAE,GAAK,SAAU,EAAG,CAAE,MAAO,IAAI,SAAQ,SAAU,EAAG,EAAG,CAAE,EAAE,KAAK,CAAC,EAAG,EAAG,EAAG,IAAM,GAAK,EAAO,EAAG,OAC9H,WAAgB,EAAG,EAAG,CAAE,GAAI,CAAE,EAAK,EAAE,GAAG,UAAc,EAAP,CAAY,EAAO,EAAE,GAAG,GAAI,IAC3E,WAAc,EAAG,CAAE,EAAE,gBAAiB,IAAU,QAAQ,QAAQ,EAAE,MAAM,GAAG,KAAK,EAAS,GAAU,EAAO,EAAE,GAAG,GAAI,GACnH,WAAiB,EAAO,CAAE,EAAO,OAAQ,GACzC,WAAgB,EAAO,CAAE,EAAO,QAAS,GACzC,WAAgB,EAAG,EAAG,CAAE,AAAI,EAAE,GAAI,EAAE,QAAS,EAAE,QAAQ,EAAO,EAAE,GAAG,GAAI,EAAE,GAAG,MAGhF,GAAmB,SAAU,EAAG,CAC5B,GAAI,GAAG,EACP,MAAO,GAAI,GAAI,EAAK,QAAS,EAAK,QAAS,SAAU,EAAG,CAAE,KAAM,KAAO,EAAK,UAAW,EAAE,OAAO,UAAY,UAAY,CAAE,MAAO,OAAS,EAC1I,WAAc,EAAG,EAAG,CAAE,EAAE,GAAK,EAAE,GAAK,SAAU,EAAG,CAAE,MAAQ,GAAI,CAAC,GAAK,CAAE,MAAO,GAAQ,EAAE,GAAG,IAAK,KAAM,IAAM,UAAa,EAAI,EAAE,GAAK,GAAO,IAG/I,GAAgB,SAAU,EAAG,CACzB,GAAI,CAAC,OAAO,cAAe,KAAM,IAAI,WAAU,wCAC/C,GAAI,GAAI,EAAE,OAAO,eAAgB,EACjC,MAAO,GAAI,EAAE,KAAK,GAAM,GAAI,MAAO,KAAa,WAAa,GAAS,GAAK,EAAE,OAAO,YAAa,EAAI,GAAI,EAAK,QAAS,EAAK,SAAU,EAAK,UAAW,EAAE,OAAO,eAAiB,UAAY,CAAE,MAAO,OAAS,GAC9M,WAAc,EAAG,CAAE,EAAE,GAAK,EAAE,IAAM,SAAU,EAAG,CAAE,MAAO,IAAI,SAAQ,SAAU,EAAS,EAAQ,CAAE,EAAI,EAAE,GAAG,GAAI,EAAO,EAAS,EAAQ,EAAE,KAAM,EAAE,UAChJ,WAAgB,EAAS,EAAQ,EAAG,EAAG,CAAE,QAAQ,QAAQ,GAAG,KAAK,SAAS,EAAG,CAAE,EAAQ,CAAE,MAAO,EAAG,KAAM,KAAS,KAGtH,GAAuB,SAAU,EAAQ,EAAK,CAC1C,MAAI,QAAO,eAAkB,OAAO,eAAe,EAAQ,MAAO,CAAE,MAAO,IAAiB,EAAO,IAAM,EAClG,GAGX,GAAI,GAAqB,OAAO,OAAU,SAAS,EAAG,EAAG,CACrD,OAAO,eAAe,EAAG,UAAW,CAAE,WAAY,GAAM,MAAO,KAC9D,SAAS,EAAG,EAAG,CAChB,EAAE,QAAa,GAGnB,GAAe,SAAU,EAAK,CAC1B,GAAI,GAAO,EAAI,WAAY,MAAO,GAClC,GAAI,GAAS,GACb,GAAI,GAAO,KAAM,OAAS,KAAK,GAAK,AAAI,IAAM,WAAa,OAAO,UAAU,eAAe,KAAK,EAAK,IAAI,GAAgB,EAAQ,EAAK,GACtI,SAAmB,EAAQ,GACpB,GAGX,GAAkB,SAAU,EAAK,CAC7B,MAAQ,IAAO,EAAI,WAAc,EAAM,CAAE,QAAW,IAGxD,GAAyB,SAAU,EAAU,EAAO,EAAM,EAAG,CACzD,GAAI,IAAS,KAAO,CAAC,EAAG,KAAM,IAAI,WAAU,iDAC5C,GAAI,MAAO,IAAU,WAAa,IAAa,GAAS,CAAC,EAAI,CAAC,EAAM,IAAI,GAAW,KAAM,IAAI,WAAU,4EACvG,MAAO,KAAS,IAAM,EAAI,IAAS,IAAM,EAAE,KAAK,GAAY,EAAI,EAAE,MAAQ,EAAM,IAAI,IAGxF,GAAyB,SAAU,EAAU,EAAO,EAAO,EAAM,EAAG,CAChE,GAAI,IAAS,IAAK,KAAM,IAAI,WAAU,kCACtC,GAAI,IAAS,KAAO,CAAC,EAAG,KAAM,IAAI,WAAU,iDAC5C,GAAI,MAAO,IAAU,WAAa,IAAa,GAAS,CAAC,EAAI,CAAC,EAAM,IAAI,GAAW,KAAM,IAAI,WAAU,2EACvG,MAAQ,KAAS,IAAM,EAAE,KAAK,EAAU,GAAS,EAAI,EAAE,MAAQ,EAAQ,EAAM,IAAI,EAAU,GAAS,GAGxG,EAAS,YAAa,IACtB,EAAS,WAAY,IACrB,EAAS,SAAU,IACnB,EAAS,aAAc,IACvB,EAAS,UAAW,IACpB,EAAS,aAAc,IACvB,EAAS,YAAa,IACtB,EAAS,cAAe,IACxB,EAAS,eAAgB,IACzB,EAAS,kBAAmB,IAC5B,EAAS,WAAY,IACrB,EAAS,SAAU,IACnB,EAAS,WAAY,IACrB,EAAS,iBAAkB,IAC3B,EAAS,gBAAiB,IAC1B,EAAS,UAAW,IACpB,EAAS,mBAAoB,IAC7B,EAAS,mBAAoB,IAC7B,EAAS,gBAAiB,IAC1B,EAAS,uBAAwB,IACjC,EAAS,eAAgB,IACzB,EAAS,kBAAmB,IAC5B,EAAS,yBAA0B,IACnC,EAAS,yBAA0B,QChTvC;AAAA;AAAA;AAAA;AAAA;AAAA,GAMA,AAAC,UAA0C,EAAM,EAAS,CACzD,AAAG,MAAO,KAAY,UAAY,MAAO,KAAW,SACnD,GAAO,QAAU,IACb,AAAG,MAAO,SAAW,YAAc,OAAO,IAC9C,OAAO,GAAI,GACP,AAAG,MAAO,KAAY,SAC1B,GAAQ,YAAiB,IAEzB,EAAK,YAAiB,MACrB,GAAM,UAAW,CACpB,MAAiB,WAAW,CAClB,GAAI,GAAuB,CAE/B,IACC,SAAS,EAAyB,EAAqB,EAAqB,CAEnF,aAGA,EAAoB,EAAE,EAAqB,CACzC,QAAW,UAAW,CAAE,MAAqB,OAI/C,GAAI,GAAe,EAAoB,KACnC,EAAoC,EAAoB,EAAE,GAE1D,EAAS,EAAoB,KAC7B,EAA8B,EAAoB,EAAE,GAEpD,EAAa,EAAoB,KACjC,EAA8B,EAAoB,EAAE,GAExD,WAAiB,EAAK,CAA6B,MAAI,OAAO,SAAW,YAAc,MAAO,QAAO,UAAa,SAAY,EAAU,SAAiB,EAAK,CAAE,MAAO,OAAO,IAAiB,EAAU,SAAiB,EAAK,CAAE,MAAO,IAAO,MAAO,SAAW,YAAc,EAAI,cAAgB,QAAU,IAAQ,OAAO,UAAY,SAAW,MAAO,IAAiB,EAAQ,GAEnX,WAAyB,EAAU,EAAa,CAAE,GAAI,CAAE,aAAoB,IAAgB,KAAM,IAAI,WAAU,qCAEhH,WAA2B,EAAQ,EAAO,CAAE,OAAS,GAAI,EAAG,EAAI,EAAM,OAAQ,IAAK,CAAE,GAAI,GAAa,EAAM,GAAI,EAAW,WAAa,EAAW,YAAc,GAAO,EAAW,aAAe,GAAU,SAAW,IAAY,GAAW,SAAW,IAAM,OAAO,eAAe,EAAQ,EAAW,IAAK,IAE7S,WAAsB,EAAa,EAAY,EAAa,CAAE,MAAI,IAAY,EAAkB,EAAY,UAAW,GAAiB,GAAa,EAAkB,EAAa,GAAqB,EAQzM,GAAI,GAA+B,UAAY,CAI7C,WAAyB,EAAS,CAChC,EAAgB,KAAM,GAEtB,KAAK,eAAe,GACpB,KAAK,gBAQP,SAAa,EAAiB,CAAC,CAC7B,IAAK,iBACL,MAAO,UAA0B,CAC/B,GAAI,GAAU,UAAU,OAAS,GAAK,UAAU,KAAO,OAAY,UAAU,GAAK,GAClF,KAAK,OAAS,EAAQ,OACtB,KAAK,UAAY,EAAQ,UACzB,KAAK,QAAU,EAAQ,QACvB,KAAK,OAAS,EAAQ,OACtB,KAAK,KAAO,EAAQ,KACpB,KAAK,QAAU,EAAQ,QACvB,KAAK,aAAe,KAOrB,CACD,IAAK,gBACL,MAAO,UAAyB,CAC9B,AAAI,KAAK,KACP,KAAK,aACI,KAAK,QACd,KAAK,iBAOR,CACD,IAAK,oBACL,MAAO,UAA6B,CAClC,GAAI,GAAQ,SAAS,gBAAgB,aAAa,SAAW,MAC7D,KAAK,SAAW,SAAS,cAAc,YAEvC,KAAK,SAAS,MAAM,SAAW,OAE/B,KAAK,SAAS,MAAM,OAAS,IAC7B,KAAK,SAAS,MAAM,QAAU,IAC9B,KAAK,SAAS,MAAM,OAAS,IAE7B,KAAK,SAAS,MAAM,SAAW,WAC/B,KAAK,SAAS,MAAM,EAAQ,QAAU,QAAU,UAEhD,GAAI,GAAY,OAAO,aAAe,SAAS,gBAAgB,UAC/D,YAAK,SAAS,MAAM,IAAM,GAAG,OAAO,EAAW,MAC/C,KAAK,SAAS,aAAa,WAAY,IACvC,KAAK,SAAS,MAAQ,KAAK,KACpB,KAAK,WAOb,CACD,IAAK,aACL,MAAO,UAAsB,CAC3B,GAAI,GAAQ,KAER,EAAW,KAAK,oBAEpB,KAAK,oBAAsB,UAAY,CACrC,MAAO,GAAM,cAGf,KAAK,YAAc,KAAK,UAAU,iBAAiB,QAAS,KAAK,sBAAwB,GACzF,KAAK,UAAU,YAAY,GAC3B,KAAK,aAAe,IAAiB,GACrC,KAAK,WACL,KAAK,eAON,CACD,IAAK,aACL,MAAO,UAAsB,CAC3B,AAAI,KAAK,aACP,MAAK,UAAU,oBAAoB,QAAS,KAAK,qBACjD,KAAK,YAAc,KACnB,KAAK,oBAAsB,MAGzB,KAAK,UACP,MAAK,UAAU,YAAY,KAAK,UAChC,KAAK,SAAW,QAOnB,CACD,IAAK,eACL,MAAO,UAAwB,CAC7B,KAAK,aAAe,IAAiB,KAAK,QAC1C,KAAK,aAMN,CACD,IAAK,WACL,MAAO,UAAoB,CACzB,GAAI,GAEJ,GAAI,CACF,EAAY,SAAS,YAAY,KAAK,cAC/B,EAAP,CACA,EAAY,GAGd,KAAK,aAAa,KAOnB,CACD,IAAK,eACL,MAAO,SAAsB,EAAW,CACtC,KAAK,QAAQ,KAAK,EAAY,UAAY,QAAS,CACjD,OAAQ,KAAK,OACb,KAAM,KAAK,aACX,QAAS,KAAK,QACd,eAAgB,KAAK,eAAe,KAAK,UAO5C,CACD,IAAK,iBACL,MAAO,UAA0B,CAC/B,AAAI,KAAK,SACP,KAAK,QAAQ,QAGf,SAAS,cAAc,OACvB,OAAO,eAAe,oBAOvB,CACD,IAAK,UAKL,MAAO,UAAmB,CACxB,KAAK,eAEN,CACD,IAAK,SACL,IAAK,UAAe,CAClB,GAAI,GAAS,UAAU,OAAS,GAAK,UAAU,KAAO,OAAY,UAAU,GAAK,OAGjF,GAFA,KAAK,QAAU,EAEX,KAAK,UAAY,QAAU,KAAK,UAAY,MAC9C,KAAM,IAAI,OAAM,uDAQpB,IAAK,UAAe,CAClB,MAAO,MAAK,UAQb,CACD,IAAK,SACL,IAAK,SAAa,EAAQ,CACxB,GAAI,IAAW,OACb,GAAI,GAAU,EAAQ,KAAY,UAAY,EAAO,WAAa,EAAG,CACnE,GAAI,KAAK,SAAW,QAAU,EAAO,aAAa,YAChD,KAAM,IAAI,OAAM,qFAGlB,GAAI,KAAK,SAAW,OAAU,GAAO,aAAa,aAAe,EAAO,aAAa,aACnF,KAAM,IAAI,OAAM,yGAGlB,KAAK,QAAU,MAEf,MAAM,IAAI,OAAM,gDAStB,IAAK,UAAe,CAClB,MAAO,MAAK,YAIT,KAGwB,EAAoB,EAErD,WAA0B,EAAK,CAA6B,MAAI,OAAO,SAAW,YAAc,MAAO,QAAO,UAAa,SAAY,EAAmB,SAAiB,EAAK,CAAE,MAAO,OAAO,IAAiB,EAAmB,SAAiB,EAAK,CAAE,MAAO,IAAO,MAAO,SAAW,YAAc,EAAI,cAAgB,QAAU,IAAQ,OAAO,UAAY,SAAW,MAAO,IAAiB,EAAiB,GAEvZ,WAAkC,EAAU,EAAa,CAAE,GAAI,CAAE,aAAoB,IAAgB,KAAM,IAAI,WAAU,qCAEzH,YAAoC,EAAQ,EAAO,CAAE,OAAS,GAAI,EAAG,EAAI,EAAM,OAAQ,IAAK,CAAE,GAAI,GAAa,EAAM,GAAI,EAAW,WAAa,EAAW,YAAc,GAAO,EAAW,aAAe,GAAU,SAAW,IAAY,GAAW,SAAW,IAAM,OAAO,eAAe,EAAQ,EAAW,IAAK,IAEtT,YAA+B,EAAa,EAAY,EAAa,CAAE,MAAI,IAAY,GAA2B,EAAY,UAAW,GAAiB,GAAa,GAA2B,EAAa,GAAqB,EAEpO,YAAmB,EAAU,EAAY,CAAE,GAAI,MAAO,IAAe,YAAc,IAAe,KAAQ,KAAM,IAAI,WAAU,sDAAyD,EAAS,UAAY,OAAO,OAAO,GAAc,EAAW,UAAW,CAAE,YAAa,CAAE,MAAO,EAAU,SAAU,GAAM,aAAc,MAAe,GAAY,GAAgB,EAAU,GAEnX,YAAyB,EAAG,EAAG,CAAE,UAAkB,OAAO,gBAAkB,SAAyB,EAAG,EAAG,CAAE,SAAE,UAAY,EAAU,GAAa,GAAgB,EAAG,GAErK,YAAsB,EAAS,CAAE,GAAI,GAA4B,KAA6B,MAAO,WAAgC,CAAE,GAAI,GAAQ,GAAgB,GAAU,EAAQ,GAAI,EAA2B,CAAE,GAAI,IAAY,GAAgB,MAAM,YAAa,EAAS,QAAQ,UAAU,EAAO,UAAW,QAAqB,GAAS,EAAM,MAAM,KAAM,WAAc,MAAO,IAA2B,KAAM,IAE5Z,YAAoC,EAAM,EAAM,CAAE,MAAI,IAAS,GAAiB,KAAU,UAAY,MAAO,IAAS,YAAsB,EAAe,GAAuB,GAElL,YAAgC,EAAM,CAAE,GAAI,IAAS,OAAU,KAAM,IAAI,gBAAe,6DAAgE,MAAO,GAE/J,aAAqC,CAA0E,GAApE,MAAO,UAAY,aAAe,CAAC,QAAQ,WAA6B,QAAQ,UAAU,KAAM,MAAO,GAAO,GAAI,MAAO,QAAU,WAAY,MAAO,GAAM,GAAI,CAAE,YAAK,UAAU,SAAS,KAAK,QAAQ,UAAU,KAAM,GAAI,UAAY,KAAa,SAAe,EAAP,CAAY,MAAO,IAE1T,YAAyB,EAAG,CAAE,UAAkB,OAAO,eAAiB,OAAO,eAAiB,SAAyB,EAAG,CAAE,MAAO,GAAE,WAAa,OAAO,eAAe,IAAc,GAAgB,GAWxM,YAA2B,EAAQ,EAAS,CAC1C,GAAI,GAAY,kBAAkB,OAAO,GAEzC,GAAI,EAAC,EAAQ,aAAa,GAI1B,MAAO,GAAQ,aAAa,GAQ9B,GAAI,IAAyB,SAAU,EAAU,CAC/C,GAAU,EAAW,GAErB,GAAI,GAAS,GAAa,GAM1B,WAAmB,EAAS,EAAS,CACnC,GAAI,IAEJ,SAAyB,KAAM,GAE/B,GAAQ,EAAO,KAAK,MAEpB,GAAM,eAAe,GAErB,GAAM,YAAY,GAEX,GAST,UAAsB,EAAW,CAAC,CAChC,IAAK,iBACL,MAAO,UAA0B,CAC/B,GAAI,GAAU,UAAU,OAAS,GAAK,UAAU,KAAO,OAAY,UAAU,GAAK,GAClF,KAAK,OAAS,MAAO,GAAQ,QAAW,WAAa,EAAQ,OAAS,KAAK,cAC3E,KAAK,OAAS,MAAO,GAAQ,QAAW,WAAa,EAAQ,OAAS,KAAK,cAC3E,KAAK,KAAO,MAAO,GAAQ,MAAS,WAAa,EAAQ,KAAO,KAAK,YACrE,KAAK,UAAY,EAAiB,EAAQ,aAAe,SAAW,EAAQ,UAAY,SAAS,OAOlG,CACD,IAAK,cACL,MAAO,SAAqB,EAAS,CACnC,GAAI,IAAS,KAEb,KAAK,SAAW,IAAiB,EAAS,QAAS,SAAU,GAAG,CAC9D,MAAO,IAAO,QAAQ,QAQzB,CACD,IAAK,UACL,MAAO,SAAiB,EAAG,CACzB,GAAI,IAAU,EAAE,gBAAkB,EAAE,cAEpC,AAAI,KAAK,iBACP,MAAK,gBAAkB,MAGzB,KAAK,gBAAkB,GAAI,GAAiB,CAC1C,OAAQ,KAAK,OAAO,IACpB,OAAQ,KAAK,OAAO,IACpB,KAAM,KAAK,KAAK,IAChB,UAAW,KAAK,UAChB,QAAS,GACT,QAAS,SAQZ,CACD,IAAK,gBACL,MAAO,SAAuB,EAAS,CACrC,MAAO,IAAkB,SAAU,KAOpC,CACD,IAAK,gBACL,MAAO,SAAuB,EAAS,CACrC,GAAI,IAAW,GAAkB,SAAU,GAE3C,GAAI,GACF,MAAO,UAAS,cAAc,MASjC,CACD,IAAK,cAML,MAAO,SAAqB,EAAS,CACnC,MAAO,IAAkB,OAAQ,KAMlC,CACD,IAAK,UACL,MAAO,UAAmB,CACxB,KAAK,SAAS,UAEV,KAAK,iBACP,MAAK,gBAAgB,UACrB,KAAK,gBAAkB,SAGzB,CAAC,CACH,IAAK,cACL,MAAO,UAAuB,CAC5B,GAAI,GAAS,UAAU,OAAS,GAAK,UAAU,KAAO,OAAY,UAAU,GAAK,CAAC,OAAQ,OACtF,GAAU,MAAO,IAAW,SAAW,CAAC,GAAU,EAClD,GAAU,CAAC,CAAC,SAAS,sBACzB,UAAQ,QAAQ,SAAU,GAAQ,CAChC,GAAU,IAAW,CAAC,CAAC,SAAS,sBAAsB,MAEjD,OAIJ,GACN,KAE8B,GAAa,IAIxC,IACC,SAAS,EAAQ,CAExB,GAAI,GAAqB,EAKzB,GAAI,MAAO,UAAY,aAAe,CAAC,QAAQ,UAAU,QAAS,CAC9D,GAAI,GAAQ,QAAQ,UAEpB,EAAM,QAAU,EAAM,iBACN,EAAM,oBACN,EAAM,mBACN,EAAM,kBACN,EAAM,sBAU1B,WAAkB,EAAS,EAAU,CACjC,KAAO,GAAW,EAAQ,WAAa,GAAoB,CACvD,GAAI,MAAO,GAAQ,SAAY,YAC3B,EAAQ,QAAQ,GAClB,MAAO,GAET,EAAU,EAAQ,YAI1B,EAAO,QAAU,GAKX,IACC,SAAS,EAAQ,EAA0B,EAAqB,CAEvE,GAAI,GAAU,EAAoB,KAYlC,WAAmB,EAAS,EAAU,EAAM,EAAU,EAAY,CAC9D,GAAI,GAAa,EAAS,MAAM,KAAM,WAEtC,SAAQ,iBAAiB,EAAM,EAAY,GAEpC,CACH,QAAS,UAAW,CAChB,EAAQ,oBAAoB,EAAM,EAAY,KAe1D,WAAkB,EAAU,EAAU,EAAM,EAAU,EAAY,CAE9D,MAAI,OAAO,GAAS,kBAAqB,WAC9B,EAAU,MAAM,KAAM,WAI7B,MAAO,IAAS,WAGT,EAAU,KAAK,KAAM,UAAU,MAAM,KAAM,WAIlD,OAAO,IAAa,UACpB,GAAW,SAAS,iBAAiB,IAIlC,MAAM,UAAU,IAAI,KAAK,EAAU,SAAU,EAAS,CACzD,MAAO,GAAU,EAAS,EAAU,EAAM,EAAU,MAa5D,WAAkB,EAAS,EAAU,EAAM,EAAU,CACjD,MAAO,UAAS,EAAG,CACf,EAAE,eAAiB,EAAQ,EAAE,OAAQ,GAEjC,EAAE,gBACF,EAAS,KAAK,EAAS,IAKnC,EAAO,QAAU,GAKX,IACC,SAAS,EAAyB,EAAS,CAQlD,EAAQ,KAAO,SAAS,EAAO,CAC3B,MAAO,KAAU,QACV,YAAiB,cACjB,EAAM,WAAa,GAS9B,EAAQ,SAAW,SAAS,EAAO,CAC/B,GAAI,GAAO,OAAO,UAAU,SAAS,KAAK,GAE1C,MAAO,KAAU,QACT,KAAS,qBAAuB,IAAS,4BACzC,UAAY,IACZ,GAAM,SAAW,GAAK,EAAQ,KAAK,EAAM,MASrD,EAAQ,OAAS,SAAS,EAAO,CAC7B,MAAO,OAAO,IAAU,UACjB,YAAiB,SAS5B,EAAQ,GAAK,SAAS,EAAO,CACzB,GAAI,GAAO,OAAO,UAAU,SAAS,KAAK,GAE1C,MAAO,KAAS,sBAMd,IACC,SAAS,EAAQ,EAA0B,EAAqB,CAEvE,GAAI,GAAK,EAAoB,KACzB,EAAW,EAAoB,KAWnC,WAAgB,EAAQ,EAAM,EAAU,CACpC,GAAI,CAAC,GAAU,CAAC,GAAQ,CAAC,EACrB,KAAM,IAAI,OAAM,8BAGpB,GAAI,CAAC,EAAG,OAAO,GACX,KAAM,IAAI,WAAU,oCAGxB,GAAI,CAAC,EAAG,GAAG,GACP,KAAM,IAAI,WAAU,qCAGxB,GAAI,EAAG,KAAK,GACR,MAAO,GAAW,EAAQ,EAAM,GAE/B,GAAI,EAAG,SAAS,GACjB,MAAO,GAAe,EAAQ,EAAM,GAEnC,GAAI,EAAG,OAAO,GACf,MAAO,GAAe,EAAQ,EAAM,GAGpC,KAAM,IAAI,WAAU,6EAa5B,WAAoB,EAAM,EAAM,EAAU,CACtC,SAAK,iBAAiB,EAAM,GAErB,CACH,QAAS,UAAW,CAChB,EAAK,oBAAoB,EAAM,KAc3C,WAAwB,EAAU,EAAM,EAAU,CAC9C,aAAM,UAAU,QAAQ,KAAK,EAAU,SAAS,EAAM,CAClD,EAAK,iBAAiB,EAAM,KAGzB,CACH,QAAS,UAAW,CAChB,MAAM,UAAU,QAAQ,KAAK,EAAU,SAAS,EAAM,CAClD,EAAK,oBAAoB,EAAM,OAe/C,WAAwB,EAAU,EAAM,EAAU,CAC9C,MAAO,GAAS,SAAS,KAAM,EAAU,EAAM,GAGnD,EAAO,QAAU,GAKX,IACC,SAAS,EAAQ,CAExB,WAAgB,EAAS,CACrB,GAAI,GAEJ,GAAI,EAAQ,WAAa,SACrB,EAAQ,QAER,EAAe,EAAQ,cAElB,EAAQ,WAAa,SAAW,EAAQ,WAAa,WAAY,CACtE,GAAI,GAAa,EAAQ,aAAa,YAEtC,AAAK,GACD,EAAQ,aAAa,WAAY,IAGrC,EAAQ,SACR,EAAQ,kBAAkB,EAAG,EAAQ,MAAM,QAEtC,GACD,EAAQ,gBAAgB,YAG5B,EAAe,EAAQ,UAEtB,CACD,AAAI,EAAQ,aAAa,oBACrB,EAAQ,QAGZ,GAAI,GAAY,OAAO,eACnB,EAAQ,SAAS,cAErB,EAAM,mBAAmB,GACzB,EAAU,kBACV,EAAU,SAAS,GAEnB,EAAe,EAAU,WAG7B,MAAO,GAGX,EAAO,QAAU,GAKX,IACC,SAAS,EAAQ,CAExB,YAAc,EAKd,EAAE,UAAY,CACZ,GAAI,SAAU,EAAM,EAAU,EAAK,CACjC,GAAI,GAAI,KAAK,GAAM,MAAK,EAAI,IAE5B,MAAC,GAAE,IAAU,GAAE,GAAQ,KAAK,KAAK,CAC/B,GAAI,EACJ,IAAK,IAGA,MAGT,KAAM,SAAU,EAAM,EAAU,EAAK,CACnC,GAAI,GAAO,KACX,YAAqB,CACnB,EAAK,IAAI,EAAM,GACf,EAAS,MAAM,EAAK,WAGtB,SAAS,EAAI,EACN,KAAK,GAAG,EAAM,EAAU,IAGjC,KAAM,SAAU,EAAM,CACpB,GAAI,GAAO,GAAG,MAAM,KAAK,UAAW,GAChC,EAAW,OAAK,GAAM,MAAK,EAAI,KAAK,IAAS,IAAI,QACjD,EAAI,EACJ,EAAM,EAAO,OAEjB,IAAK,EAAG,EAAI,EAAK,IACf,EAAO,GAAG,GAAG,MAAM,EAAO,GAAG,IAAK,GAGpC,MAAO,OAGT,IAAK,SAAU,EAAM,EAAU,CAC7B,GAAI,GAAI,KAAK,GAAM,MAAK,EAAI,IACxB,EAAO,EAAE,GACT,EAAa,GAEjB,GAAI,GAAQ,EACV,OAAS,GAAI,EAAG,EAAM,EAAK,OAAQ,EAAI,EAAK,IAC1C,AAAI,EAAK,GAAG,KAAO,GAAY,EAAK,GAAG,GAAG,IAAM,GAC9C,EAAW,KAAK,EAAK,IAQ3B,MAAC,GAAW,OACR,EAAE,GAAQ,EACV,MAAO,GAAE,GAEN,OAIX,EAAO,QAAU,EACjB,EAAO,QAAQ,YAAc,IAQf,EAA2B,GAG/B,WAA6B,EAAU,CAEtC,GAAG,EAAyB,GAC3B,MAAO,GAAyB,GAAU,QAG3C,GAAI,GAAS,EAAyB,GAAY,CAGjD,QAAS,IAIV,SAAoB,GAAU,EAAQ,EAAO,QAAS,GAG/C,EAAO,QAKf,MAAC,WAAW,CAEX,EAAoB,EAAI,SAAS,EAAQ,CACxC,GAAI,GAAS,GAAU,EAAO,WAC7B,UAAW,CAAE,MAAO,GAAO,SAC3B,UAAW,CAAE,MAAO,IACrB,SAAoB,EAAE,EAAQ,CAAE,EAAG,IAC5B,MAKR,UAAW,CAEX,EAAoB,EAAI,SAAS,EAAS,EAAY,CACrD,OAAQ,KAAO,GACd,AAAG,EAAoB,EAAE,EAAY,IAAQ,CAAC,EAAoB,EAAE,EAAS,IAC5E,OAAO,eAAe,EAAS,EAAK,CAAE,WAAY,GAAM,IAAK,EAAW,SAO3E,UAAW,CACX,EAAoB,EAAI,SAAS,EAAK,EAAM,CAAE,MAAO,QAAO,UAAU,eAAe,KAAK,EAAK,OAOzF,EAAoB,QAEpC,YCx7BD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,GAeA,GAAI,IAAkB,UAOtB,GAAO,QAAU,GAUjB,YAAoB,EAAQ,CAC1B,GAAI,GAAM,GAAK,EACX,EAAQ,GAAgB,KAAK,GAEjC,GAAI,CAAC,EACH,MAAO,GAGT,GAAI,GACA,EAAO,GACP,EAAQ,EACR,EAAY,EAEhB,IAAK,EAAQ,EAAM,MAAO,EAAQ,EAAI,OAAQ,IAAS,CACrD,OAAQ,EAAI,WAAW,QAChB,IACH,EAAS,SACT,UACG,IACH,EAAS,QACT,UACG,IACH,EAAS,QACT,UACG,IACH,EAAS,OACT,UACG,IACH,EAAS,OACT,cAEA,SAGJ,AAAI,IAAc,GAChB,IAAQ,EAAI,UAAU,EAAW,IAGnC,EAAY,EAAQ,EACpB,GAAQ,EAGV,MAAO,KAAc,EACjB,EAAO,EAAI,UAAU,EAAW,GAChC,KC5EN,MAAM,UAAU,MAAM,OAAO,eAAe,MAAM,UAAU,OAAO,CAAC,aAAa,GAAG,MAAM,YAAY,CAAC,GAAI,GAAE,MAAM,UAAU,IAAI,EAAE,OAAO,UAAU,IAAI,MAAO,GAAE,MAAM,UAAU,OAAO,KAAK,KAAK,SAAS,EAAE,EAAE,CAAC,MAAO,OAAM,QAAQ,GAAG,EAAE,KAAK,MAAM,EAAE,EAAE,KAAK,EAAE,EAAE,IAAI,EAAE,KAAK,GAAG,GAAG,IAAI,MAAM,UAAU,MAAM,KAAK,OAAO,SAAS,KAAK,MAAM,UAAU,SAAS,OAAO,eAAe,MAAM,UAAU,UAAU,CAAC,aAAa,GAAG,MAAM,SAAS,EAAE,CAAC,MAAO,OAAM,UAAU,IAAI,MAAM,KAAK,WAAW,QAAQ,SAAS,KCuBrf,OAAO,SCvBP,KAAK,OAAQ,MAAK,MAAM,SAAS,EAAE,EAAE,CAAC,MAAO,GAAE,GAAG,GAAG,GAAI,SAAQ,SAAS,EAAE,EAAE,CAAC,GAAI,GAAE,GAAI,gBAAe,EAAE,GAAG,EAAE,GAAG,EAAE,GAAG,EAAE,UAAU,CAAC,MAAM,CAAC,GAAG,AAAI,GAAE,OAAO,IAAI,IAAjB,EAAoB,WAAW,EAAE,WAAW,OAAO,EAAE,OAAO,IAAI,EAAE,YAAY,KAAK,UAAU,CAAC,MAAO,SAAQ,QAAQ,EAAE,eAAe,KAAK,UAAU,CAAC,MAAO,SAAQ,QAAQ,EAAE,cAAc,KAAK,KAAK,QAAQ,KAAK,UAAU,CAAC,MAAO,SAAQ,QAAQ,GAAI,MAAK,CAAC,EAAE,aAAa,MAAM,EAAE,QAAQ,CAAC,KAAK,UAAU,CAAC,MAAO,IAAG,QAAQ,UAAU,CAAC,MAAO,IAAG,IAAI,SAAS,EAAE,CAAC,MAAO,GAAE,EAAE,gBAAgB,IAAI,SAAS,EAAE,CAAC,MAAO,GAAE,eAAgB,OAAM,OAAQ,KAAK,GAAE,KAAK,EAAE,QAAQ,MAAM,EAAE,IAAI,EAAE,OAAO,UAAU,CAAC,EAAE,wBAAwB,QAAQ,+BAA+B,SAAS,EAAE,EAAE,EAAE,CAAC,EAAE,KAAK,EAAE,EAAE,eAAe,EAAE,KAAK,CAAC,EAAE,IAAI,EAAE,GAAG,EAAE,GAAG,EAAE,GAAG,IAAI,EAAE,IAAI,EAAE,MAAM,EAAE,QAAQ,EAAE,EAAE,gBAAgB,AAAW,EAAE,aAAb,UAAyB,EAAE,QAAQ,EAAE,iBAAiB,EAAE,EAAE,QAAQ,IAAI,EAAE,KAAK,EAAE,MAAM,UDyB14B,OAAO,SEzBP,OAAkB,WACZ,CACF,YACA,YACA,UACA,cACA,WACA,cACA,aACA,eACA,gBACA,mBACA,YACA,SACA,YACA,kBACA,gBACA,WACA,oBACA,oBACA,iBACA,wBACA,gBACA,mBACA,0BACA,2BACA,WCtBE,WAAqB,EAAU,CACnC,MAAO,OAAO,IAAU,WCIpB,YAA8B,EAAgC,CAClE,GAAM,GAAS,SAAC,EAAa,CAC3B,MAAM,KAAK,GACX,EAAS,MAAQ,GAAI,SAAQ,OAGzB,EAAW,EAAW,GAC5B,SAAS,UAAY,OAAO,OAAO,MAAM,WACzC,EAAS,UAAU,YAAc,EAC1B,ECAF,GAAM,IAA+C,GAC1D,SAAC,EAAM,CACL,MAAA,UAA4C,EAA0B,CACpE,EAAO,MACP,KAAK,QAAU,EACR,EAAO,OAAM;EACxB,EAAO,IAAI,SAAC,EAAK,EAAC,CAAK,MAAG,GAAI,EAAC,KAAK,EAAI,aAAc,KAAK;KACnD,GACJ,KAAK,KAAO,sBACZ,KAAK,OAAS,KCtBd,YAAuB,EAA6B,EAAO,CAC/D,GAAI,EAAK,CACP,GAAM,GAAQ,EAAI,QAAQ,GAC1B,GAAK,GAAS,EAAI,OAAO,EAAO,ICSpC,GAAA,IAAA,UAAA,CAyBE,WAAoB,EAA4B,CAA5B,KAAA,gBAAA,EAdb,KAAA,OAAS,GAER,KAAA,WAAmD,KAMnD,KAAA,WAAoD,KAc5D,SAAA,UAAA,YAAA,UAAA,aACM,EAEJ,GAAI,CAAC,KAAK,OAAQ,CAChB,KAAK,OAAS,GAGN,GAAA,GAAe,KAAI,WAC3B,GAAI,EAEF,GADA,KAAK,WAAa,KACd,MAAM,QAAQ,OAChB,OAAqB,GAAA,GAAA,GAAU,EAAA,EAAA,OAAA,CAAA,EAAA,KAAA,EAAA,EAAA,OAAE,CAA5B,GAAM,GAAM,EAAA,MACf,EAAO,OAAO,4GAGhB,GAAW,OAAO,MAId,GAAA,GAAoB,KAAI,gBAChC,GAAI,EAAW,GACb,GAAI,CACF,UACO,EAAP,CACA,EAAS,YAAa,IAAsB,EAAE,OAAS,CAAC,GAIpD,GAAA,GAAe,KAAI,WAC3B,GAAI,EAAY,CACd,KAAK,WAAa,SAClB,OAAuB,GAAA,GAAA,GAAU,EAAA,EAAA,OAAA,CAAA,EAAA,KAAA,EAAA,EAAA,OAAE,CAA9B,GAAM,GAAQ,EAAA,MACjB,GAAI,CACF,GAAa,SACN,EAAP,CACA,EAAS,GAAM,KAAN,EAAU,GACnB,AAAI,YAAe,IACjB,EAAM,EAAA,EAAA,GAAA,EAAO,IAAM,EAAK,EAAI,SAE5B,EAAO,KAAK,uGAMpB,GAAI,EACF,KAAM,IAAI,IAAoB,KAuBpC,EAAA,UAAA,IAAA,SAAI,EAAuB,OAGzB,GAAI,GAAY,IAAa,KAC3B,GAAI,KAAK,OAGP,GAAa,OACR,CACL,GAAI,YAAoB,GAAc,CAGpC,GAAI,EAAS,QAAU,EAAS,WAAW,MACzC,OAEF,EAAS,WAAW,MAEtB,AAAC,MAAK,WAAa,GAAA,KAAK,cAAU,MAAA,IAAA,OAAA,EAAI,IAAI,KAAK,KAU7C,EAAA,UAAA,WAAR,SAAmB,EAAoB,CAC7B,GAAA,GAAe,KAAI,WAC3B,MAAO,KAAe,GAAW,MAAM,QAAQ,IAAe,EAAW,SAAS,IAU5E,EAAA,UAAA,WAAR,SAAmB,EAAoB,CAC7B,GAAA,GAAe,KAAI,WAC3B,KAAK,WAAa,MAAM,QAAQ,GAAe,GAAW,KAAK,GAAS,GAAc,EAAa,CAAC,EAAY,GAAU,GAOpH,EAAA,UAAA,cAAR,SAAsB,EAAoB,CAChC,GAAA,GAAe,KAAI,WAC3B,AAAI,IAAe,EACjB,KAAK,WAAa,KACT,MAAM,QAAQ,IACvB,GAAU,EAAY,IAkB1B,EAAA,UAAA,OAAA,SAAO,EAAsC,CACnC,GAAA,GAAe,KAAI,WAC3B,GAAc,GAAU,EAAY,GAEhC,YAAoB,IACtB,EAAS,cAAc,OAhLb,EAAA,MAAS,UAAA,CACrB,GAAM,GAAQ,GAAI,GAClB,SAAM,OAAS,GACR,KAgLX,KAEO,GAAM,IAAqB,GAAa,MAEzC,YAAyB,EAAU,CACvC,MACE,aAAiB,KAChB,GAAS,UAAY,IAAS,EAAW,EAAM,SAAW,EAAW,EAAM,MAAQ,EAAW,EAAM,aAIzG,YAAsB,EAAuC,CAC3D,AAAI,EAAW,GACb,IAEA,EAAS,cC9MN,GAAM,IAAuB,CAClC,iBAAkB,KAClB,sBAAuB,KACvB,QAAS,OACT,sCAAuC,GACvC,yBAA0B,ICErB,GAAM,IAAmC,CAG9C,WAAU,UAAA,QAAC,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,GAAA,UAAA,GACD,GAAA,GAAa,GAAe,SACpC,MAAQ,KAAQ,KAAA,OAAR,EAAU,aAAc,YAAW,MAAA,OAAA,EAAA,GAAA,EAAI,MAEjD,aAAY,SAAC,EAAM,CACT,GAAA,GAAa,GAAe,SACpC,MAAQ,KAAQ,KAAA,OAAR,EAAU,eAAgB,cAAc,IAElD,SAAU,QCbN,YAA+B,EAAQ,CAC3C,GAAgB,WAAW,UAAA,CACjB,GAAA,GAAqB,GAAM,iBACnC,GAAI,EAEF,EAAiB,OAGjB,MAAM,KCnBN,aAAc,ECMb,GAAM,IAAyB,UAAA,CAAM,MAAA,IAAmB,IAAK,OAAW,WAOzE,YAA4B,EAAU,CAC1C,MAAO,IAAmB,IAAK,OAAW,GAQtC,YAA8B,EAAQ,CAC1C,MAAO,IAAmB,IAAK,EAAO,QASlC,YAA6B,EAAuB,EAAY,EAAU,CAC9E,MAAO,CACL,KAAI,EACJ,MAAK,EACL,MAAK,GCnCT,GAAI,IAAuD,KASrD,YAAuB,EAAc,CACzC,GAAI,GAAO,sCAAuC,CAChD,GAAM,GAAS,CAAC,GAKhB,GAJI,GACF,IAAU,CAAE,YAAa,GAAO,MAAO,OAEzC,IACI,EAAQ,CACJ,GAAA,GAAyB,GAAvB,EAAW,EAAA,YAAE,EAAK,EAAA,MAE1B,GADA,GAAU,KACN,EACF,KAAM,QAMV,KAQE,YAAuB,EAAQ,CACnC,AAAI,GAAO,uCAAyC,IAClD,IAAQ,YAAc,GACtB,GAAQ,MAAQ,GCnBpB,GAAA,IAAA,SAAA,EAAA,CAAmC,EAAA,EAAA,GA6BjC,WAAY,EAA6C,CAAzD,GAAA,GACE,EAAA,KAAA,OAAO,KATC,SAAA,UAAqB,GAU7B,AAAI,EACF,GAAK,YAAc,EAGf,GAAe,IACjB,EAAY,IAAI,IAGlB,EAAK,YAAc,KAvBhB,SAAA,OAAP,SAAiB,EAAwB,EAA2B,EAAqB,CACvF,MAAO,IAAI,IAAe,EAAM,EAAO,IAiCzC,EAAA,UAAA,KAAA,SAAK,EAAS,CACZ,AAAI,KAAK,UACP,GAA0B,GAAiB,GAAQ,MAEnD,KAAK,MAAM,IAWf,EAAA,UAAA,MAAA,SAAM,EAAS,CACb,AAAI,KAAK,UACP,GAA0B,GAAkB,GAAM,MAElD,MAAK,UAAY,GACjB,KAAK,OAAO,KAUhB,EAAA,UAAA,SAAA,UAAA,CACE,AAAI,KAAK,UACP,GAA0B,GAAuB,MAEjD,MAAK,UAAY,GACjB,KAAK,cAIT,EAAA,UAAA,YAAA,UAAA,CACE,AAAK,KAAK,QACR,MAAK,UAAY,GACjB,EAAA,UAAM,YAAW,KAAA,MACjB,KAAK,YAAc,OAIb,EAAA,UAAA,MAAV,SAAgB,EAAQ,CACtB,KAAK,YAAY,KAAK,IAGd,EAAA,UAAA,OAAV,SAAiB,EAAQ,CACvB,GAAI,CACF,KAAK,YAAY,MAAM,WAEvB,KAAK,gBAIC,EAAA,UAAA,UAAV,UAAA,CACE,GAAI,CACF,KAAK,YAAY,mBAEjB,KAAK,gBAGX,GApHmC,IAsHnC,GAAA,IAAA,SAAA,EAAA,CAAuC,EAAA,EAAA,GACrC,WACE,EACA,EACA,EAA8B,CAHhC,GAAA,GAKE,EAAA,KAAA,OAAO,KAEH,EACJ,GAAI,EAAW,GAGb,EAAO,UACE,EAAgB,CAMzB,AAAG,EAA0B,EAAc,KAAlC,EAAoB,EAAc,MAA3B,EAAa,EAAc,SAC3C,GAAI,GACJ,AAAI,GAAQ,GAAO,yBAIjB,GAAU,OAAO,OAAO,GACxB,EAAQ,YAAc,UAAA,CAAM,MAAA,GAAK,gBAEjC,EAAU,EAEZ,EAAO,GAAI,KAAA,OAAJ,EAAM,KAAK,GAClB,EAAQ,GAAK,KAAA,OAAL,EAAO,KAAK,GACpB,EAAW,GAAQ,KAAA,OAAR,EAAU,KAAK,GAK5B,SAAK,YAAc,CACjB,KAAM,EAAO,GAAqB,EAAM,GAAQ,GAChD,MAAO,GAAqB,GAAK,KAAL,EAAS,GAAqB,GAC1D,SAAU,EAAW,GAAqB,EAAU,GAAQ,MAGlE,MAAA,IA3CuC,IAoDvC,YAA8B,EAA8B,EAA6B,CACvF,MAAO,WAAA,QAAC,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,GAAA,UAAA,GACN,GAAI,CACF,EAAO,MAAA,OAAA,EAAA,GAAA,EAAI,WACJ,EAAP,CACA,AAAI,GAAO,sCACT,GAAa,GAIb,GAAqB,KAW7B,YAA6B,EAAQ,CACnC,KAAM,GAQR,YAAmC,EAA2C,EAA2B,CAC/F,GAAA,GAA0B,GAAM,sBACxC,GAAyB,GAAgB,WAAW,UAAA,CAAM,MAAA,GAAsB,EAAc,KAQzF,GAAM,IAA6D,CACxE,OAAQ,GACR,KAAM,GACN,MAAO,GACP,SAAU,ICzOL,GAAM,IAA+B,UAAA,CAAM,MAAC,OAAO,SAAW,YAAc,OAAO,YAAe,kBCDnG,YAAsB,EAAI,CAC9B,MAAO,GCsEH,aAAc,QAAC,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,GAAA,UAAA,GACnB,MAAO,IAAc,GAIjB,YAA8B,EAA+B,CACjE,MAAI,GAAI,SAAW,EACV,GAGL,EAAI,SAAW,EACV,EAAI,GAGN,SAAe,EAAQ,CAC5B,MAAO,GAAI,OAAO,SAAC,EAAW,EAAuB,CAAK,MAAA,GAAG,IAAO,ICtExE,GAAA,GAAA,UAAA,CAkBE,WAAY,EAA6E,CACvF,AAAI,GACF,MAAK,WAAa,GA8BtB,SAAA,UAAA,KAAA,SAAQ,EAAyB,CAC/B,GAAM,GAAa,GAAI,GACvB,SAAW,OAAS,KACpB,EAAW,SAAW,EACf,GA+IT,EAAA,UAAA,UAAA,SACE,EACA,EACA,EAA8B,CAHhC,GAAA,GAAA,KAKQ,EAAa,GAAa,GAAkB,EAAiB,GAAI,IAAe,EAAgB,EAAO,GAE7G,UAAa,UAAA,CACL,GAAA,GAAuB,EAArB,EAAQ,EAAA,SAAE,EAAM,EAAA,OACxB,EAAW,IACT,EAGI,EAAS,KAAK,EAAY,GAC1B,EAIA,EAAK,WAAW,GAGhB,EAAK,cAAc,MAIpB,GAIC,EAAA,UAAA,cAAV,SAAwB,EAAmB,CACzC,GAAI,CACF,MAAO,MAAK,WAAW,SAChB,EAAP,CAIA,EAAK,MAAM,KA+Df,EAAA,UAAA,QAAA,SAAQ,EAA0B,EAAoC,CAAtE,GAAA,GAAA,KACE,SAAc,GAAe,GAEtB,GAAI,GAAkB,SAAC,EAAS,EAAM,CAC3C,GAAM,GAAa,GAAI,IAAkB,CACvC,KAAM,SAAC,EAAK,CACV,GAAI,CACF,EAAK,SACE,EAAP,CACA,EAAO,GACP,EAAW,gBAGf,MAAO,EACP,SAAU,IAEZ,EAAK,UAAU,MAKT,EAAA,UAAA,WAAV,SAAqB,EAA2B,OAC9C,MAAO,GAAA,KAAK,UAAM,MAAA,IAAA,OAAA,OAAA,EAAE,UAAU,IAQhC,EAAA,UAAC,IAAD,UAAA,CACE,MAAO,OA6FT,EAAA,UAAA,KAAA,UAAA,QAAK,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,GAAA,UAAA,GACH,MAAO,IAAc,GAAY,OA8BnC,EAAA,UAAA,UAAA,SAAU,EAAoC,CAA9C,GAAA,GAAA,KACE,SAAc,GAAe,GAEtB,GAAI,GAAY,SAAC,EAAS,EAAM,CACrC,GAAI,GACJ,EAAK,UACH,SAAC,EAAI,CAAK,MAAC,GAAQ,GACnB,SAAC,EAAQ,CAAK,MAAA,GAAO,IACrB,UAAA,CAAM,MAAA,GAAQ,QAxab,EAAA,OAAkC,SAAI,EAAwD,CACnG,MAAO,IAAI,GAAc,IA2a7B,KASA,YAAwB,EAA+C,OACrE,MAAO,GAAA,GAAW,KAAX,EAAe,GAAO,WAAO,MAAA,IAAA,OAAA,EAAI,QAG1C,YAAuB,EAAU,CAC/B,MAAO,IAAS,EAAW,EAAM,OAAS,EAAW,EAAM,QAAU,EAAW,EAAM,UAGxF,YAAyB,EAAU,CACjC,MAAQ,IAAS,YAAiB,KAAgB,GAAW,IAAU,GAAe,GCzelF,YAAkB,EAAW,CACjC,MAAO,GAAW,GAAM,KAAA,OAAN,EAAQ,MAOtB,WACJ,EAAqF,CAErF,MAAO,UAAC,EAAqB,CAC3B,GAAI,GAAQ,GACV,MAAO,GAAO,KAAK,SAA+B,EAA2B,CAC3E,GAAI,CACF,MAAO,GAAK,EAAc,YACnB,EAAP,CACA,KAAK,MAAM,MAIjB,KAAM,IAAI,WAAU,2CCvBxB,GAAA,GAAA,SAAA,EAAA,CAA2C,EAAA,EAAA,GAazC,WACE,EACA,EACA,EACA,EACQ,EAAuB,CALjC,GAAA,GAmBE,EAAA,KAAA,KAAM,IAAY,KAdV,SAAA,WAAA,EAeR,EAAK,MAAQ,EACT,SAAuC,EAAQ,CAC7C,GAAI,CACF,EAAO,SACA,EAAP,CACA,EAAY,MAAM,KAGtB,EAAA,UAAM,MACV,EAAK,OAAS,EACV,SAAuC,EAAQ,CAC7C,GAAI,CACF,EAAQ,SACD,EAAP,CAEA,EAAY,MAAM,WAGlB,KAAK,gBAGT,EAAA,UAAM,OACV,EAAK,UAAY,EACb,UAAA,CACE,GAAI,CACF,UACO,EAAP,CAEA,EAAY,MAAM,WAGlB,KAAK,gBAGT,EAAA,UAAM,YAGZ,SAAA,UAAA,YAAA,UAAA,OACU,EAAW,KAAI,OACvB,EAAA,UAAM,YAAW,KAAA,MAEjB,CAAC,GAAU,IAAA,KAAK,cAAU,MAAA,IAAA,QAAA,EAAA,KAAf,QAEf,GA5E2C,ICQpC,GAAM,IAAiD,CAG5D,SAAA,SAAS,EAAQ,CACf,GAAI,GAAU,sBACV,EAAkD,qBAC9C,EAAa,GAAsB,SAC3C,AAAI,GACF,GAAU,EAAS,sBACnB,EAAS,EAAS,sBAEpB,GAAM,GAAS,EAAQ,SAAC,EAAS,CAI/B,EAAS,OACT,EAAS,KAEX,MAAO,IAAI,IAAa,UAAA,CAAM,MAAA,IAAM,KAAA,OAAN,EAAS,MAEzC,sBAAqB,UAAA,QAAC,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,GAAA,UAAA,GACZ,GAAA,GAAa,GAAsB,SAC3C,MAAQ,KAAQ,KAAA,OAAR,EAAU,wBAAyB,uBAAsB,MAAA,OAAA,EAAA,GAAA,EAAI,MAEvE,qBAAoB,UAAA,QAAC,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,GAAA,UAAA,GACX,GAAA,GAAa,GAAsB,SAC3C,MAAQ,KAAQ,KAAA,OAAR,EAAU,uBAAwB,sBAAqB,MAAA,OAAA,EAAA,GAAA,EAAI,MAErE,SAAU,QCrBL,GAAM,IAAuD,GAClE,SAAC,EAAM,CACL,MAAA,WAAoC,CAClC,EAAO,MACP,KAAK,KAAO,0BACZ,KAAK,QAAU,yBCVrB,GAAA,GAAA,SAAA,EAAA,CAAgC,EAAA,EAAA,GAqB9B,YAAA,CAAA,GAAA,GAEE,EAAA,KAAA,OAAO,KAtBT,SAAA,OAAS,GAET,EAAA,UAA2B,GAE3B,EAAA,UAAY,GAEZ,EAAA,SAAW,GAEX,EAAA,YAAmB,OAkBnB,SAAA,UAAA,KAAA,SAAQ,EAAwB,CAC9B,GAAM,GAAU,GAAI,IAAiB,KAAM,MAC3C,SAAQ,SAAW,EACZ,GAIC,EAAA,UAAA,eAAV,UAAA,CACE,GAAI,KAAK,OACP,KAAM,IAAI,KAId,EAAA,UAAA,KAAA,SAAK,EAAQ,CAAb,GAAA,GAAA,KACE,GAAa,UAAA,SAEX,GADA,EAAK,iBACD,CAAC,EAAK,UAAW,CACnB,GAAM,GAAO,EAAK,UAAU,YAC5B,OAAuB,GAAA,GAAA,GAAI,EAAA,EAAA,OAAA,CAAA,EAAA,KAAA,EAAA,EAAA,OAAE,CAAxB,GAAM,GAAQ,EAAA,MACjB,EAAS,KAAK,0GAMtB,EAAA,UAAA,MAAA,SAAM,EAAQ,CAAd,GAAA,GAAA,KACE,GAAa,UAAA,CAEX,GADA,EAAK,iBACD,CAAC,EAAK,UAAW,CACnB,EAAK,SAAW,EAAK,UAAY,GACjC,EAAK,YAAc,EAEnB,OADQ,GAAc,EAAI,UACnB,EAAU,QACf,EAAU,QAAS,MAAM,OAMjC,EAAA,UAAA,SAAA,UAAA,CAAA,GAAA,GAAA,KACE,GAAa,UAAA,CAEX,GADA,EAAK,iBACD,CAAC,EAAK,UAAW,CACnB,EAAK,UAAY,GAEjB,OADQ,GAAc,EAAI,UACnB,EAAU,QACf,EAAU,QAAS,eAM3B,EAAA,UAAA,YAAA,UAAA,CACE,KAAK,UAAY,KAAK,OAAS,GAC/B,KAAK,UAAY,MAGnB,OAAA,eAAI,EAAA,UAAA,WAAQ,KAAZ,UAAA,OACE,MAAO,IAAA,KAAK,aAAS,MAAA,IAAA,OAAA,OAAA,EAAE,QAAS,mCAIxB,EAAA,UAAA,cAAV,SAAwB,EAAyB,CAC/C,YAAK,iBACE,EAAA,UAAM,cAAa,KAAA,KAAC,IAInB,EAAA,UAAA,WAAV,SAAqB,EAAyB,CAC5C,YAAK,iBACL,KAAK,wBAAwB,GACtB,KAAK,gBAAgB,IAIpB,EAAA,UAAA,gBAAV,SAA0B,EAA2B,CAC7C,GAAA,GAAqC,KAAnC,EAAQ,EAAA,SAAE,EAAS,EAAA,UAAE,EAAS,EAAA,UACtC,MAAO,IAAY,EACf,GACC,GAAU,KAAK,GAAa,GAAI,IAAa,UAAA,CAAM,MAAA,IAAU,EAAW,OAIrE,EAAA,UAAA,wBAAV,SAAkC,EAA2B,CACrD,GAAA,GAAuC,KAArC,EAAQ,EAAA,SAAE,EAAW,EAAA,YAAE,EAAS,EAAA,UACxC,AAAI,EACF,EAAW,MAAM,GACR,GACT,EAAW,YAUf,EAAA,UAAA,aAAA,UAAA,CACE,GAAM,GAAkB,GAAI,GAC5B,SAAW,OAAS,KACb,GA/GF,EAAA,OAAkC,SAAI,EAA0B,EAAqB,CAC1F,MAAO,IAAI,IAAoB,EAAa,IAgHhD,GAlIgC,GAuIhC,GAAA,IAAA,SAAA,EAAA,CAAyC,EAAA,EAAA,GACvC,WAES,EACP,EAAsB,CAHxB,GAAA,GAKE,EAAA,KAAA,OAAO,KAHA,SAAA,YAAA,EAIP,EAAK,OAAS,IAGhB,SAAA,UAAA,KAAA,SAAK,EAAQ,SACX,AAAA,GAAA,GAAA,KAAK,eAAW,MAAA,IAAA,OAAA,OAAA,EAAE,QAAI,MAAA,IAAA,QAAA,EAAA,KAAA,EAAG,IAG3B,EAAA,UAAA,MAAA,SAAM,EAAQ,SACZ,AAAA,GAAA,GAAA,KAAK,eAAW,MAAA,IAAA,OAAA,OAAA,EAAE,SAAK,MAAA,IAAA,QAAA,EAAA,KAAA,EAAG,IAG5B,EAAA,UAAA,SAAA,UAAA,SACE,AAAA,GAAA,GAAA,KAAK,eAAW,MAAA,IAAA,OAAA,OAAA,EAAE,YAAQ,MAAA,IAAA,QAAA,EAAA,KAAA,IAIlB,EAAA,UAAA,WAAV,SAAqB,EAAyB,SAC5C,MAAO,GAAA,GAAA,KAAK,UAAM,MAAA,IAAA,OAAA,OAAA,EAAE,UAAU,MAAW,MAAA,IAAA,OAAA,EAAI,IAEjD,GA1ByC,GCjJlC,GAAM,IAA+C,CAC1D,IAAG,UAAA,CAGD,MAAQ,IAAsB,UAAY,MAAM,OAElD,SAAU,QCwBZ,GAAA,IAAA,SAAA,EAAA,CAAsC,EAAA,EAAA,GAUpC,WACU,EACA,EACA,EAA6D,CAF7D,AAAA,IAAA,QAAA,GAAA,KACA,IAAA,QAAA,GAAA,KACA,IAAA,QAAA,GAAA,IAHV,GAAA,GAKE,EAAA,KAAA,OAAO,KAJC,SAAA,YAAA,EACA,EAAA,YAAA,EACA,EAAA,mBAAA,EAZF,EAAA,QAA0B,GAC1B,EAAA,oBAAsB,GAc5B,EAAK,oBAAsB,IAAgB,IAC3C,EAAK,YAAc,KAAK,IAAI,EAAG,GAC/B,EAAK,YAAc,KAAK,IAAI,EAAG,KAGjC,SAAA,UAAA,KAAA,SAAK,EAAQ,CACL,GAAA,GAA+E,KAA7E,EAAS,EAAA,UAAE,EAAO,EAAA,QAAE,EAAmB,EAAA,oBAAE,EAAkB,EAAA,mBAAE,EAAW,EAAA,YAChF,AAAK,GACH,GAAQ,KAAK,GACb,CAAC,GAAuB,EAAQ,KAAK,EAAmB,MAAQ,IAElE,KAAK,cACL,EAAA,UAAM,KAAI,KAAA,KAAC,IAIH,EAAA,UAAA,WAAV,SAAqB,EAAyB,CAC5C,KAAK,iBACL,KAAK,cAQL,OANM,GAAe,KAAK,gBAAgB,GAEpC,EAAmC,KAAjC,EAAmB,EAAA,oBAAE,EAAO,EAAA,QAG9B,EAAO,EAAQ,QACZ,EAAI,EAAG,EAAI,EAAK,QAAU,CAAC,EAAW,OAAQ,GAAK,EAAsB,EAAI,EACpF,EAAW,KAAK,EAAK,IAGvB,YAAK,wBAAwB,GAEtB,GAGD,EAAA,UAAA,YAAR,UAAA,CACQ,GAAA,GAAoE,KAAlE,EAAW,EAAA,YAAE,EAAkB,EAAA,mBAAE,EAAO,EAAA,QAAE,EAAmB,EAAA,oBAK/D,EAAsB,GAAsB,EAAI,GAAK,EAK3D,GAJA,EAAc,KAAY,EAAqB,EAAQ,QAAU,EAAQ,OAAO,EAAG,EAAQ,OAAS,GAIhG,CAAC,EAAqB,CAKxB,OAJM,GAAM,EAAmB,MAC3B,EAAO,EAGF,EAAI,EAAG,EAAI,EAAQ,QAAW,EAAQ,IAAiB,EAAK,GAAK,EACxE,EAAO,EAET,GAAQ,EAAQ,OAAO,EAAG,EAAO,KAGvC,GAzEsC,GClBtC,GAAA,IAAA,SAAA,EAAA,CAA+B,EAAA,EAAA,GAC7B,WAAY,EAAsB,EAAmD,OACnF,GAAA,KAAA,OAAO,KAYF,SAAA,UAAA,SAAP,SAAgB,EAAW,EAAiB,CAAjB,MAAA,KAAA,QAAA,GAAA,GAClB,MAEX,GAjB+B,ICJxB,GAAM,IAAqC,CAGhD,YAAW,UAAA,QAAC,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,GAAA,UAAA,GACF,GAAA,GAAa,GAAgB,SACrC,MAAQ,KAAQ,KAAA,OAAR,EAAU,cAAe,aAAY,MAAA,OAAA,EAAA,GAAA,EAAI,MAEnD,cAAa,SAAC,EAAM,CACV,GAAA,GAAa,GAAgB,SACrC,MAAQ,KAAQ,KAAA,OAAR,EAAU,gBAAiB,eAAe,IAEpD,SAAU,QClBZ,GAAA,IAAA,SAAA,EAAA,CAAoC,EAAA,EAAA,GAOlC,WAAsB,EAAqC,EAAmD,CAA9G,GAAA,GACE,EAAA,KAAA,KAAM,EAAW,IAAK,KADF,SAAA,UAAA,EAAqC,EAAA,KAAA,EAFjD,EAAA,QAAmB,KAMtB,SAAA,UAAA,SAAP,SAAgB,EAAW,EAAiB,CAC1C,GADyB,IAAA,QAAA,GAAA,GACrB,KAAK,OACP,MAAO,MAIT,KAAK,MAAQ,EAEb,GAAM,GAAK,KAAK,GACV,EAAY,KAAK,UAuBvB,MAAI,IAAM,MACR,MAAK,GAAK,KAAK,eAAe,EAAW,EAAI,IAK/C,KAAK,QAAU,GAEf,KAAK,MAAQ,EAEb,KAAK,GAAK,KAAK,IAAM,KAAK,eAAe,EAAW,KAAK,GAAI,GAEtD,MAGC,EAAA,UAAA,eAAV,SAAyB,EAA2B,EAAW,EAAiB,CAAjB,MAAA,KAAA,QAAA,GAAA,GACtD,GAAiB,YAAY,EAAU,MAAM,KAAK,EAAW,MAAO,IAGnE,EAAA,UAAA,eAAV,SAAyB,EAA4B,EAAS,EAAwB,CAEpF,GAF4D,IAAA,QAAA,GAAA,GAExD,GAAS,MAAQ,KAAK,QAAU,GAAS,KAAK,UAAY,GAC5D,MAAO,GAIT,GAAiB,cAAc,IAQ1B,EAAA,UAAA,QAAP,SAAe,EAAU,EAAa,CACpC,GAAI,KAAK,OACP,MAAO,IAAI,OAAM,gCAGnB,KAAK,QAAU,GACf,GAAM,GAAQ,KAAK,SAAS,EAAO,GACnC,GAAI,EACF,MAAO,GACF,AAAI,KAAK,UAAY,IAAS,KAAK,IAAM,MAc9C,MAAK,GAAK,KAAK,eAAe,KAAK,UAAW,KAAK,GAAI,QAIjD,EAAA,UAAA,SAAV,SAAmB,EAAU,EAAc,CACzC,GAAI,GAAmB,GACnB,EACJ,GAAI,CACF,KAAK,KAAK,SACH,EAAP,CACA,EAAU,GAIV,EAAa,GAAQ,GAAI,OAAM,sCAEjC,GAAI,EACF,YAAK,cACE,GAIX,EAAA,UAAA,YAAA,UAAA,CACE,GAAI,CAAC,KAAK,OAAQ,CACV,GAAA,GAAoB,KAAlB,EAAE,EAAA,GAAE,EAAS,EAAA,UACb,EAAY,EAAS,QAE7B,KAAK,KAAO,KAAK,MAAQ,KAAK,UAAY,KAC1C,KAAK,QAAU,GAEf,GAAU,EAAS,MACf,GAAM,MACR,MAAK,GAAK,KAAK,eAAe,EAAW,EAAI,OAG/C,KAAK,MAAQ,KACb,EAAA,UAAM,YAAW,KAAA,QAGvB,GA3IoC,ICiBpC,GAAA,IAAA,UAAA,CAGE,WAAoB,EAAoC,EAAiC,CAAjC,AAAA,IAAA,QAAA,GAAoB,EAAU,KAAlE,KAAA,oBAAA,EAClB,KAAK,IAAM,EA8BN,SAAA,UAAA,SAAP,SAAmB,EAAqD,EAAmB,EAAS,CAA5B,MAAA,KAAA,QAAA,GAAA,GAC/D,GAAI,MAAK,oBAAuB,KAAM,GAAM,SAAS,EAAO,IAlCvD,EAAA,IAAoB,GAAsB,IAoC1D,KCzDA,GAAA,IAAA,SAAA,EAAA,CAAoC,EAAA,EAAA,GAkBlC,WAAY,EAAgC,EAAiC,CAAjC,AAAA,IAAA,QAAA,GAAoB,GAAU,KAA1E,GAAA,GACE,EAAA,KAAA,KAAM,EAAiB,IAAI,KAlBtB,SAAA,QAAmC,GAOnC,EAAA,QAAmB,GAQnB,EAAA,WAAkB,SAMlB,SAAA,UAAA,MAAP,SAAa,EAAwB,CAC3B,GAAA,GAAY,KAAI,QAExB,GAAI,KAAK,QAAS,CAChB,EAAQ,KAAK,GACb,OAGF,GAAI,GACJ,KAAK,QAAU,GAEf,EACE,IAAK,EAAQ,EAAO,QAAQ,EAAO,MAAO,EAAO,OAC/C,YAEM,EAAS,EAAQ,SAI3B,GAFA,KAAK,QAAU,GAEX,EAAO,CACT,KAAQ,EAAS,EAAQ,SACvB,EAAO,cAET,KAAM,KAGZ,GAhDoC,IC8C7B,GAAM,IAAiB,GAAI,IAAe,IAKpC,GAAQ,GClDrB,GAAA,IAAA,SAAA,EAAA,CAA6C,EAAA,EAAA,GAC3C,WAAsB,EAA8C,EAAmD,CAAvH,GAAA,GACE,EAAA,KAAA,KAAM,EAAW,IAAK,KADF,SAAA,UAAA,EAA8C,EAAA,KAAA,IAI1D,SAAA,UAAA,eAAV,SAAyB,EAAoC,EAAU,EAAiB,CAEtF,MAFqE,KAAA,QAAA,GAAA,GAEjE,IAAU,MAAQ,EAAQ,EACrB,EAAA,UAAM,eAAc,KAAA,KAAC,EAAW,EAAI,GAG7C,GAAU,QAAQ,KAAK,MAIhB,EAAU,YAAe,GAAU,WAAa,GAAuB,sBAAsB,UAAA,CAAM,MAAA,GAAU,MAAM,aAElH,EAAA,UAAA,eAAV,SAAyB,EAAoC,EAAU,EAAiB,CAItF,GAJqE,IAAA,QAAA,GAAA,GAIhE,GAAS,MAAQ,EAAQ,GAAO,GAAS,MAAQ,KAAK,MAAQ,EACjE,MAAO,GAAA,UAAM,eAAc,KAAA,KAAC,EAAW,EAAI,GAK7C,AAAK,EAAU,QAAQ,KAAK,SAAC,EAAM,CAAK,MAAA,GAAO,KAAO,KACpD,IAAuB,qBAAqB,GAC5C,EAAU,WAAa,SAK7B,GAlC6C,ICF7C,GAAA,IAAA,SAAA,EAAA,CAA6C,EAAA,EAAA,GAA7C,YAAA,gDACS,SAAA,UAAA,MAAP,SAAa,EAAyB,CACpC,KAAK,QAAU,GAUf,GAAM,GAAU,KAAK,WACrB,KAAK,WAAa,OAEV,GAAA,GAAY,KAAI,QACpB,EACJ,EAAS,GAAU,EAAQ,QAE3B,EACE,IAAK,EAAQ,EAAO,QAAQ,EAAO,MAAO,EAAO,OAC/C,YAEM,GAAS,EAAQ,KAAO,EAAO,KAAO,GAAW,EAAQ,SAInE,GAFA,KAAK,QAAU,GAEX,EAAO,CACT,KAAQ,GAAS,EAAQ,KAAO,EAAO,KAAO,GAAW,EAAQ,SAC/D,EAAO,cAET,KAAM,KAGZ,GAlC6C,ICgCtC,GAAM,IAA0B,GAAI,IAAwB,ICR5D,GAAM,GAAQ,GAAI,GAAkB,SAAC,EAAU,CAAK,MAAA,GAAW,aCxBhE,YAAsB,EAAU,CACpC,MAAO,IAAS,EAAW,EAAM,UCAnC,YAAiB,EAAQ,CACvB,MAAO,GAAI,EAAI,OAAS,GAGpB,YAA4B,EAAW,CAC3C,MAAO,GAAW,GAAK,IAAS,EAAK,MAAQ,OAGzC,YAAuB,EAAW,CACtC,MAAO,IAAY,GAAK,IAAS,EAAK,MAAQ,OAG1C,YAAoB,EAAa,EAAoB,CACzD,MAAO,OAAO,IAAK,IAAU,SAAW,EAAK,MAAS,ECjBjD,GAAM,IAAe,SAAI,EAAM,CAAwB,MAAA,IAAK,MAAO,GAAE,QAAW,UAAY,MAAO,IAAM,YCM1G,YAAoB,EAAU,CAClC,MAAO,GAAW,GAAK,KAAA,OAAL,EAAO,MCFrB,YAA8B,EAAU,CAC5C,MAAO,GAAW,EAAM,KCJpB,YAA6B,EAAQ,CACzC,MAAO,QAAO,eAAiB,EAAW,GAAG,KAAA,OAAH,EAAM,OAAO,gBCCnD,YAA2C,EAAU,CAEzD,MAAO,IAAI,WACT,gBACE,KAAU,MAAQ,MAAO,IAAU,SAAW,oBAAsB,IAAI,EAAK,KAAG,4HCRhF,aAA2B,CAC/B,MAAI,OAAO,SAAW,YAAc,CAAC,OAAO,SACnC,aAGF,OAAO,SAGT,GAAM,IAAW,KCJlB,YAAqB,EAAU,CACnC,MAAO,GAAW,GAAK,KAAA,OAAL,EAAQ,KCFtB,YAAuD,EAAqC,mGAC1F,EAAS,EAAe,qEAGF,MAAA,CAAA,EAAA,GAAM,EAAO,sBAA/B,GAAkB,EAAA,OAAhB,EAAK,EAAA,MAAE,EAAI,EAAA,KACf,iBAAA,CAAA,EAAA,UACF,MAAA,CAAA,EAAA,EAAA,2BAEI,WAAN,MAAA,CAAA,EAAA,EAAA,eAAA,SAAA,wCAGF,SAAO,yCAIL,YAAkC,EAAQ,CAG9C,MAAO,GAAW,GAAG,KAAA,OAAH,EAAK,WCPnB,WAAuB,EAAyB,CACpD,GAAI,YAAiB,GACnB,MAAO,GAET,GAAI,GAAS,KAAM,CACjB,GAAI,GAAoB,GACtB,MAAO,IAAsB,GAE/B,GAAI,GAAY,GACd,MAAO,IAAc,GAEvB,GAAI,GAAU,GACZ,MAAO,IAAY,GAErB,GAAI,GAAgB,GAClB,MAAO,IAAkB,GAE3B,GAAI,GAAW,GACb,MAAO,IAAa,GAEtB,GAAI,GAAqB,GACvB,MAAO,IAAuB,GAIlC,KAAM,IAAiC,GAOnC,YAAmC,EAAQ,CAC/C,MAAO,IAAI,GAAW,SAAC,EAAyB,CAC9C,GAAM,GAAM,EAAI,MAChB,GAAI,EAAW,EAAI,WACjB,MAAO,GAAI,UAAU,GAGvB,KAAM,IAAI,WAAU,oEAWlB,YAA2B,EAAmB,CAClD,MAAO,IAAI,GAAW,SAAC,EAAyB,CAU9C,OAAS,GAAI,EAAG,EAAI,EAAM,QAAU,CAAC,EAAW,OAAQ,IACtD,EAAW,KAAK,EAAM,IAExB,EAAW,aAIT,YAAyB,EAAuB,CACpD,MAAO,IAAI,GAAW,SAAC,EAAyB,CAC9C,EACG,KACC,SAAC,EAAK,CACJ,AAAK,EAAW,QACd,GAAW,KAAK,GAChB,EAAW,aAGf,SAAC,EAAQ,CAAK,MAAA,GAAW,MAAM,KAEhC,KAAK,KAAM,MAIZ,YAA0B,EAAqB,CACnD,MAAO,IAAI,GAAW,SAAC,EAAyB,aAC9C,OAAoB,GAAA,GAAA,GAAQ,EAAA,EAAA,OAAA,CAAA,EAAA,KAAA,EAAA,EAAA,OAAE,CAAzB,GAAM,GAAK,EAAA,MAEd,GADA,EAAW,KAAK,GACZ,EAAW,OACb,yGAGJ,EAAW,aAIT,YAA+B,EAA+B,CAClE,MAAO,IAAI,GAAW,SAAC,EAAyB,CAC9C,GAAQ,EAAe,GAAY,MAAM,SAAC,EAAG,CAAK,MAAA,GAAW,MAAM,OAIjE,YAAoC,EAAqC,CAC7E,MAAO,IAAkB,GAAmC,IAG9D,YAA0B,EAAiC,EAAyB,uIACxD,EAAA,GAAA,iFAIxB,GAJe,EAAK,EAAA,MACpB,EAAW,KAAK,GAGZ,EAAW,OACb,MAAA,CAAA,8RAGJ,SAAW,oBC/GP,YACJ,EACA,EACA,EACA,EACA,EAAc,CADd,AAAA,IAAA,QAAA,GAAA,GACA,IAAA,QAAA,GAAA,IAEA,GAAM,GAAuB,EAAU,SAAS,UAAA,CAC9C,IACA,AAAI,EACF,EAAmB,IAAI,KAAK,SAAS,KAAM,IAE3C,KAAK,eAEN,GAIH,GAFA,EAAmB,IAAI,GAEnB,CAAC,EAKH,MAAO,GCiBL,YAAuB,EAA0B,EAAS,CAAT,MAAA,KAAA,QAAA,GAAA,GAC9C,EAAQ,SAAC,EAAQ,EAAU,CAChC,EAAO,UACL,GAAI,GACF,EACA,SAAC,EAAK,CAAK,MAAA,IAAgB,EAAY,EAAW,UAAA,CAAM,MAAA,GAAW,KAAK,IAAQ,IAChF,UAAA,CAAM,MAAA,IAAgB,EAAY,EAAW,UAAA,CAAM,MAAA,GAAW,YAAY,IAC1E,SAAC,EAAG,CAAK,MAAA,IAAgB,EAAY,EAAW,UAAA,CAAM,MAAA,GAAW,MAAM,IAAM,QCH/E,YAAyB,EAA0B,EAAiB,CAAjB,MAAA,KAAA,QAAA,GAAA,GAChD,EAAQ,SAAC,EAAQ,EAAU,CAChC,EAAW,IAAI,EAAU,SAAS,UAAA,CAAM,MAAA,GAAO,UAAU,IAAa,MC3DpE,YAAgC,EAA6B,EAAwB,CACzF,MAAO,GAAU,GAAO,KAAK,GAAY,GAAY,GAAU,ICD3D,YAA6B,EAAuB,EAAwB,CAChF,MAAO,GAAU,GAAO,KAAK,GAAY,GAAY,GAAU,ICH3D,YAA2B,EAAqB,EAAwB,CAC5E,MAAO,IAAI,GAAc,SAAC,EAAU,CAElC,GAAI,GAAI,EAER,MAAO,GAAU,SAAS,UAAA,CACxB,AAAI,IAAM,EAAM,OAGd,EAAW,WAIX,GAAW,KAAK,EAAM,MAIjB,EAAW,QACd,KAAK,gBCVT,YAA8B,EAAoB,EAAwB,CAC9E,MAAO,IAAI,GAAc,SAAC,EAAU,CAClC,GAAI,GAKJ,UAAgB,EAAY,EAAW,UAAA,CAErC,EAAY,EAAc,MAE1B,GACE,EACA,EACA,UAAA,OACM,EACA,EACJ,GAAI,CAEF,AAAC,EAAkB,EAAS,OAAzB,EAAK,EAAA,MAAE,EAAI,EAAA,WACP,EAAP,CAEA,EAAW,MAAM,GACjB,OAGF,AAAI,EAKF,EAAW,WAGX,EAAW,KAAK,IAGpB,EACA,MAQG,UAAA,CAAM,MAAA,GAAW,GAAQ,KAAA,OAAR,EAAU,SAAW,EAAS,YCrDpD,YAAmC,EAAyB,EAAwB,CACxF,GAAI,CAAC,EACH,KAAM,IAAI,OAAM,2BAElB,MAAO,IAAI,GAAc,SAAC,EAAU,CAClC,GAAgB,EAAY,EAAW,UAAA,CACrC,GAAM,GAAW,EAAM,OAAO,iBAC9B,GACE,EACA,EACA,UAAA,CACE,EAAS,OAAO,KAAK,SAAC,EAAM,CAC1B,AAAI,EAAO,KAGT,EAAW,WAEX,EAAW,KAAK,EAAO,UAI7B,EACA,QCrBF,YAAwC,EAA8B,EAAwB,CAClG,MAAO,IAAsB,GAAmC,GAAQ,GCqBpE,YAAuB,EAA2B,EAAwB,CAC9E,GAAI,GAAS,KAAM,CACjB,GAAI,GAAoB,GACtB,MAAO,IAAmB,EAAO,GAEnC,GAAI,GAAY,GACd,MAAO,IAAc,EAAO,GAE9B,GAAI,GAAU,GACZ,MAAO,IAAgB,EAAO,GAEhC,GAAI,GAAgB,GAClB,MAAO,IAAsB,EAAO,GAEtC,GAAI,GAAW,GACb,MAAO,IAAiB,EAAO,GAEjC,GAAI,GAAqB,GACvB,MAAO,IAA2B,EAAO,GAG7C,KAAM,IAAiC,GCqDnC,YAAkB,EAA2B,EAAyB,CAC1E,MAAO,GAAY,GAAU,EAAO,GAAa,EAAU,GCvBvD,YAAY,QAAI,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,GAAA,UAAA,GACpB,GAAM,GAAY,GAAa,GAC/B,MAAO,IAAK,EAAa,GC1ErB,YAAsB,EAAU,CACpC,MAAO,aAAiB,OAAQ,CAAC,MAAM,GCuCnC,WAAoB,EAAyC,EAAa,CAC9E,MAAO,GAAQ,SAAC,EAAQ,EAAU,CAEhC,GAAI,GAAQ,EAGZ,EAAO,UACL,GAAI,GAAmB,EAAY,SAAC,EAAQ,CAG1C,EAAW,KAAK,EAAQ,KAAK,EAAS,EAAO,WCtD7C,GAAA,IAAY,MAAK,QAEzB,YAA2B,EAA6B,EAAW,CAC/D,MAAO,IAAQ,GAAQ,EAAE,MAAA,OAAA,EAAA,GAAA,EAAI,KAAQ,EAAG,GAOtC,YAAiC,EAA2B,CAC9D,MAAO,GAAI,SAAA,EAAI,CAAI,MAAA,IAAY,EAAI,KCd/B,GAAA,IAAY,MAAK,QACjB,GAA0D,OAAM,eAArC,GAA+B,OAAM,UAAlB,GAAY,OAAM,KAQlE,YAA+D,EAAuB,CAC1F,GAAI,EAAK,SAAW,EAAG,CACrB,GAAM,GAAQ,EAAK,GACnB,GAAI,GAAQ,GACV,MAAO,CAAE,KAAM,EAAO,KAAM,MAE9B,GAAI,GAAO,GAAQ,CACjB,GAAM,GAAO,GAAQ,GACrB,MAAO,CACL,KAAM,EAAK,IAAI,SAAC,EAAG,CAAK,MAAA,GAAM,KAC9B,KAAI,IAKV,MAAO,CAAE,KAAM,EAAa,KAAM,MAGpC,YAAgB,EAAQ,CACtB,MAAO,IAAO,MAAO,IAAQ,UAAY,GAAe,KAAS,GC5B7D,YAAuB,EAAgB,EAAa,CACxD,MAAO,GAAK,OAAO,SAAC,EAAQ,EAAK,EAAC,CAAK,MAAE,GAAO,GAAO,EAAO,GAAK,GAAS,ICuMxE,YAAuB,QAAoC,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,GAAA,UAAA,GAC/D,GAAM,GAAY,GAAa,GACzB,EAAiB,GAAkB,GAEnC,EAA8B,GAAqB,GAA3C,EAAW,EAAA,KAAE,EAAI,EAAA,KAE/B,GAAI,EAAY,SAAW,EAIzB,MAAO,IAAK,GAAI,GAGlB,GAAM,GAAS,GAAI,GACjB,GACE,EACA,EACA,EAEI,SAAC,EAAM,CAAK,MAAA,IAAa,EAAM,IAE/B,KAIR,MAAO,GAAkB,EAAO,KAAK,GAAiB,IAAqC,EAGvF,YACJ,EACA,EACA,EAAiD,CAAjD,MAAA,KAAA,QAAA,GAAA,IAEO,SAAC,EAA2B,CAGjC,GACE,EACA,UAAA,CAaE,OAZQ,GAAW,EAAW,OAExB,EAAS,GAAI,OAAM,GAGrB,EAAS,EAIT,EAAuB,aAGlB,EAAC,CACR,GACE,EACA,UAAA,CACE,GAAM,GAAS,GAAK,EAAY,GAAI,GAChC,EAAgB,GACpB,EAAO,UACL,GAAI,GACF,EACA,SAAC,EAAK,CAEJ,EAAO,GAAK,EACP,GAEH,GAAgB,GAChB,KAEG,GAGH,EAAW,KAAK,EAAe,EAAO,WAG1C,UAAA,CACE,AAAK,EAAE,GAGL,EAAW,eAMrB,IAjCK,EAAI,EAAG,EAAI,EAAQ,MAAnB,IAqCX,IASN,YAAuB,EAAsC,EAAqB,EAA0B,CAC1G,AAAI,EACF,GAAgB,EAAc,EAAW,GAEzC,ICzRE,YACJ,EACA,EACA,EACA,EACA,EACA,EACA,EACA,EAA+B,CAG/B,GAAM,GAAc,GAEhB,EAAS,EAET,EAAQ,EAER,EAAa,GAKX,EAAgB,UAAA,CAIpB,AAAI,GAAc,CAAC,EAAO,QAAU,CAAC,GACnC,EAAW,YAKT,EAAY,SAAC,EAAQ,CAAK,MAAC,GAAS,EAAa,EAAW,GAAS,EAAO,KAAK,IAEjF,EAAa,SAAC,EAAQ,CAI1B,GAAU,EAAW,KAAK,GAI1B,IAKA,GAAI,GAAgB,GAGpB,EAAU,EAAQ,EAAO,MAAU,UACjC,GAAI,GACF,EACA,SAAC,EAAU,CAGT,GAAY,MAAZ,EAAe,GAEf,AAAI,EAGF,EAAU,GAGV,EAAW,KAAK,IAGpB,UAAA,CAGE,EAAgB,IAGlB,OACA,UAAA,CAIE,GAAI,EAKF,GAAI,CAIF,IAKA,qBACE,GAAM,GAAgB,EAAO,QAI7B,AAAI,EACF,GAAgB,EAAY,EAAmB,UAAA,CAAM,MAAA,GAAW,KAEhE,EAAW,IARR,EAAO,QAAU,EAAS,OAYjC,UACO,EAAP,CACA,EAAW,MAAM,QAS7B,SAAO,UACL,GAAI,GAAmB,EAAY,EAAW,UAAA,CAE5C,EAAa,GACb,OAMG,UAAA,CACL,GAAkB,MAAlB,KChEE,YACJ,EACA,EACA,EAA6B,CAE7B,MAFA,KAAA,QAAA,GAAA,KAEI,EAAW,GAEN,GAAS,SAAC,EAAG,EAAC,CAAK,MAAA,GAAI,SAAC,EAAQ,EAAU,CAAK,MAAA,GAAe,EAAG,EAAG,EAAG,KAAK,EAAU,EAAQ,EAAG,MAAM,GACrG,OAAO,IAAmB,UACnC,GAAa,GAGR,EAAQ,SAAC,EAAQ,EAAU,CAAK,MAAA,IAAe,EAAQ,EAAY,EAAS,MC/B/E,YAAmD,EAA6B,CAA7B,MAAA,KAAA,QAAA,GAAA,KAChD,GAAS,GAAU,GCLtB,aAAmB,CACvB,MAAO,IAAS,GCoDZ,aAAgB,QAAC,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,GAAA,UAAA,GACrB,MAAO,MAAY,GAAK,EAAM,GAAa,KC7DvC,WAAgD,EAA0B,CAC9E,MAAO,IAAI,GAA+B,SAAC,EAAU,CACnD,EAAU,KAAqB,UAAU,KC9C7C,GAAM,IAA0B,CAAC,cAAe,kBAC1C,GAAqB,CAAC,mBAAoB,uBAC1C,GAAgB,CAAC,KAAM,OA8NvB,WACJ,EACA,EACA,EACA,EAAsC,CAMtC,GAJI,EAAW,IACb,GAAiB,EACjB,EAAU,QAER,EACF,MAAO,GAAa,EAAQ,EAAW,GAAiC,KAAK,GAAiB,IAU1F,GAAA,GAAA,EAEJ,GAAc,GACV,GAAmB,IAAI,SAAC,EAAU,CAAK,MAAA,UAAC,EAAY,CAAK,MAAA,GAAO,GAAY,EAAW,EAAS,MAElG,GAAwB,GACtB,GAAwB,IAAI,GAAwB,EAAQ,IAC5D,GAA0B,GAC1B,GAAc,IAAI,GAAwB,EAAQ,IAClD,GAAE,GATD,EAAG,EAAA,GAAE,EAAM,EAAA,GAgBlB,GAAI,CAAC,GACC,GAAY,GACd,MAAO,IAAS,SAAC,EAAc,CAAK,MAAA,GAAU,EAAW,EAAW,KAClE,EAAU,IAOhB,GAAI,CAAC,EACH,KAAM,IAAI,WAAU,wBAGtB,MAAO,IAAI,GAAc,SAAC,EAAU,CAIlC,GAAM,GAAU,UAAA,QAAC,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,GAAA,UAAA,GAAmB,MAAA,GAAW,KAAK,EAAI,EAAK,OAAS,EAAO,EAAK,KAElF,SAAI,GAEG,UAAA,CAAM,MAAA,GAAQ,MAWzB,YAAiC,EAAa,EAAiB,CAC7D,MAAO,UAAC,EAAkB,CAAK,MAAA,UAAC,EAAY,CAAK,MAAA,GAAO,GAAY,EAAW,KAQjF,YAAiC,EAAW,CAC1C,MAAO,GAAW,EAAO,cAAgB,EAAW,EAAO,gBAQ7D,YAAmC,EAAW,CAC5C,MAAO,GAAW,EAAO,KAAO,EAAW,EAAO,KAQpD,YAAuB,EAAW,CAChC,MAAO,GAAW,EAAO,mBAAqB,EAAW,EAAO,qBC9L5D,YACJ,EACA,EACA,EAAsC,CAEtC,MAAI,GACK,GAAoB,EAAY,GAAe,KAAK,GAAiB,IAGvE,GAAI,GAAoB,SAAC,EAAU,CACxC,GAAM,GAAU,UAAA,QAAC,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,GAAA,UAAA,GAAc,MAAA,GAAW,KAAK,EAAE,SAAW,EAAI,EAAE,GAAK,IACjE,EAAW,EAAW,GAC5B,MAAO,GAAW,GAAiB,UAAA,CAAM,MAAA,GAAc,EAAS,IAAY,SCpB1E,YACJ,EACA,EACA,EAAyC,CAFzC,AAAA,IAAA,QAAA,GAAA,GAEA,IAAA,QAAA,GAAA,IAIA,GAAI,GAAmB,GAEvB,MAAI,IAAuB,MAIzB,CAAI,GAAY,GACd,EAAY,EAIZ,EAAmB,GAIhB,GAAI,GAAW,SAAC,EAAU,CAI/B,GAAI,GAAM,GAAY,GAAW,CAAC,EAAU,EAAW,MAAQ,EAE/D,AAAI,EAAM,GAER,GAAM,GAIR,GAAI,GAAI,EAGR,MAAO,GAAU,SAAS,UAAA,CACxB,AAAK,EAAW,QAEd,GAAW,KAAK,KAEhB,AAAI,GAAK,EAGP,KAAK,SAAS,OAAW,GAGzB,EAAW,aAGd,KC9FD,YAAe,QAAC,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,GAAA,UAAA,GACpB,GAAM,GAAY,GAAa,GACzB,EAAa,GAAU,EAAM,KAC7B,EAAU,EAChB,MAAO,AAAC,GAAQ,OAGZ,EAAQ,SAAW,EAEnB,EAAU,EAAQ,IAElB,GAAS,GAAY,GAAK,EAAS,IALnC,EC3DC,GAAM,IAAQ,GAAI,GAAkB,ICpCnC,GAAA,IAAY,MAAK,QAMnB,YAA4B,EAAiB,CACjD,MAAO,GAAK,SAAW,GAAK,GAAQ,EAAK,IAAM,EAAK,GAAM,ECqDtD,WAAoB,EAAiD,EAAa,CACtF,MAAO,GAAQ,SAAC,EAAQ,EAAU,CAEhC,GAAI,GAAQ,EAIZ,EAAO,UAIL,GAAI,GAAmB,EAAY,SAAC,EAAK,CAAK,MAAA,GAAU,KAAK,EAAS,EAAO,MAAY,EAAW,KAAK,QCrBzG,aAAa,QAAC,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,GAAA,UAAA,GAClB,GAAM,GAAiB,GAAkB,GAEnC,EAAU,GAAe,GAE/B,MAAO,GAAQ,OACX,GAAI,GAAsB,SAAC,EAAU,CAGnC,GAAI,GAAuB,EAAQ,IAAI,UAAA,CAAM,MAAA,KAKzC,EAAY,EAAQ,IAAI,UAAA,CAAM,MAAA,KAGlC,EAAW,IAAI,UAAA,CACb,EAAU,EAAY,OAMxB,mBAAS,EAAW,CAClB,EAAU,EAAQ,IAAc,UAC9B,GAAI,GACF,EACA,SAAC,EAAK,CAKJ,GAJA,EAAQ,GAAa,KAAK,GAItB,EAAQ,MAAM,SAAC,EAAM,CAAK,MAAA,GAAO,SAAS,CAC5C,GAAM,GAAc,EAAQ,IAAI,SAAC,EAAM,CAAK,MAAA,GAAO,UAEnD,EAAW,KAAK,EAAiB,EAAc,MAAA,OAAA,EAAA,GAAA,EAAI,KAAU,GAIzD,EAAQ,KAAK,SAAC,EAAQ,EAAC,CAAK,MAAA,CAAC,EAAO,QAAU,EAAU,MAC1D,EAAW,aAIjB,UAAA,CAGE,EAAU,GAAe,GAIzB,CAAC,EAAQ,GAAa,QAAU,EAAW,eA5B1C,EAAc,EAAG,CAAC,EAAW,QAAU,EAAc,EAAQ,OAAQ,MAArE,GAmCT,MAAO,WAAA,CACL,EAAU,EAAY,QAG1B,EC7DA,YAAmB,EAAoD,CAC3E,MAAO,GAAQ,SAAC,EAAQ,EAAU,CAChC,GAAI,GAAW,GACX,EAAsB,KACtB,EAA6C,KAC7C,EAAa,GAEX,EAAc,UAAA,CAGlB,GAFA,GAAkB,MAAlB,EAAoB,cACpB,EAAqB,KACjB,EAAU,CACZ,EAAW,GACX,GAAM,GAAQ,EACd,EAAY,KACZ,EAAW,KAAK,GAElB,GAAc,EAAW,YAGrB,EAAkB,UAAA,CACtB,EAAqB,KACrB,GAAc,EAAW,YAG3B,EAAO,UACL,GAAI,GACF,EACA,SAAC,EAAK,CACJ,EAAW,GACX,EAAY,EACP,GACH,EAAU,EAAiB,IAAQ,UAChC,EAAqB,GAAI,GAAmB,EAAY,EAAa,KAI5E,UAAA,CACE,EAAa,GACZ,EAAC,GAAY,CAAC,GAAsB,EAAmB,SAAW,EAAW,gBCtClF,YAAuB,EAAkB,EAAyC,CAAzC,MAAA,KAAA,QAAA,GAAA,IACtC,GAAM,UAAA,CAAM,MAAA,IAAM,EAAU,KCG/B,YAAyB,EAAoB,EAAsC,CAAtC,MAAA,KAAA,QAAA,GAAA,MAGjD,EAAmB,GAAgB,KAAhB,EAAoB,EAEhC,EAAQ,SAAC,EAAQ,EAAU,CAChC,GAAI,GAAiB,GACjB,EAAQ,EAEZ,EAAO,UACL,GAAI,GACF,EACA,SAAC,EAAK,aACA,EAAuB,KAK3B,AAAI,IAAU,IAAsB,GAClC,EAAQ,KAAK,QAIf,OAAqB,GAAA,GAAA,GAAO,EAAA,EAAA,OAAA,CAAA,EAAA,KAAA,EAAA,EAAA,OAAE,CAAzB,GAAM,GAAM,EAAA,MACf,EAAO,KAAK,GAMR,GAAc,EAAO,QACvB,GAAS,GAAM,KAAN,EAAU,GACnB,EAAO,KAAK,sGAIhB,GAAI,MAIF,OAAqB,GAAA,GAAA,GAAM,EAAA,EAAA,OAAA,CAAA,EAAA,KAAA,EAAA,EAAA,OAAE,CAAxB,GAAM,GAAM,EAAA,MACf,GAAU,EAAS,GACnB,EAAW,KAAK,uGAItB,UAAA,aAGE,OAAqB,GAAA,GAAA,GAAO,EAAA,EAAA,OAAA,CAAA,EAAA,KAAA,EAAA,EAAA,OAAE,CAAzB,GAAM,GAAM,EAAA,MACf,EAAW,KAAK,qGAElB,EAAW,YAGb,OACA,UAAA,CAEE,EAAU,UCRd,YACJ,EAAgD,CAEhD,MAAO,GAAQ,SAAC,EAAQ,EAAU,CAChC,GAAI,GAAgC,KAChC,EAAY,GACZ,EAEJ,EAAW,EAAO,UAChB,GAAI,GAAmB,EAAY,OAAW,OAAW,SAAC,EAAG,CAC3D,EAAgB,EAAU,EAAS,EAAK,GAAW,GAAU,KAC7D,AAAI,EACF,GAAS,cACT,EAAW,KACX,EAAc,UAAU,IAIxB,EAAY,MAKd,GAMF,GAAS,cACT,EAAW,KACX,EAAe,UAAU,MC5HzB,YACJ,EACA,EACA,EACA,EACA,EAAqC,CAErC,MAAO,UAAC,EAAuB,EAA2B,CAIxD,GAAI,GAAW,EAIX,EAAa,EAEb,EAAQ,EAGZ,EAAO,UACL,GAAI,GACF,EACA,SAAC,EAAK,CAEJ,GAAM,GAAI,IAEV,EAAQ,EAEJ,EAAY,EAAO,EAAO,GAIxB,GAAW,GAAO,GAGxB,GAAc,EAAW,KAAK,IAIhC,GACG,UAAA,CACC,GAAY,EAAW,KAAK,GAC5B,EAAW,eC9BjB,aAAuB,QAAO,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,GAAA,UAAA,GAClC,GAAM,GAAiB,GAAkB,GACzC,MAAO,GACH,GAAK,GAAa,MAAA,OAAA,EAAA,GAAA,EAAK,KAAuC,GAAiB,IAC/E,EAAQ,SAAC,EAAQ,EAAU,CACzB,GAAiB,EAAA,CAAE,GAAM,EAAK,GAAe,MAAQ,KCYvD,aAA2B,QAC/B,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,GAAA,UAAA,GAEA,MAAO,IAAa,MAAA,OAAA,EAAA,GAAA,EAAI,KCgCpB,YACJ,EACA,EAA6G,CAE7G,MAAO,GAAW,GAAkB,GAAS,EAAS,EAAgB,GAAK,GAAS,EAAS,GCnBzF,YAA0B,EAAiB,EAAyC,CAAzC,MAAA,KAAA,QAAA,GAAA,IACxC,EAAQ,SAAC,EAAQ,EAAU,CAChC,GAAI,GAAkC,KAClC,EAAsB,KACtB,EAA0B,KAExB,EAAO,UAAA,CACX,GAAI,EAAY,CAEd,EAAW,cACX,EAAa,KACb,GAAM,GAAQ,EACd,EAAY,KACZ,EAAW,KAAK,KAGpB,YAAqB,CAInB,GAAM,GAAa,EAAY,EACzB,EAAM,EAAU,MACtB,GAAI,EAAM,EAAY,CAEpB,EAAa,KAAK,SAAS,OAAW,EAAa,GACnD,EAAW,IAAI,GACf,OAGF,IAGF,EAAO,UACL,GAAI,GACF,EACA,SAAC,EAAQ,CACP,EAAY,EACZ,EAAW,EAAU,MAGhB,GACH,GAAa,EAAU,SAAS,EAAc,GAC9C,EAAW,IAAI,KAGnB,UAAA,CAGE,IACA,EAAW,YAGb,OACA,UAAA,CAEE,EAAY,EAAa,UC/E7B,YAA+B,EAAe,CAClD,MAAO,GAAQ,SAAC,EAAQ,EAAU,CAChC,GAAI,GAAW,GACf,EAAO,UACL,GAAI,GACF,EACA,SAAC,EAAK,CACJ,EAAW,GACX,EAAW,KAAK,IAElB,UAAA,CACE,AAAK,GACH,EAAW,KAAK,GAElB,EAAW,gBCNf,YAAkB,EAAa,CACnC,MAAO,IAAS,EAEZ,UAAA,CAAM,MAAA,IACN,EAAQ,SAAC,EAAQ,EAAU,CACzB,GAAI,GAAO,EACX,EAAO,UACL,GAAI,GAAmB,EAAY,SAAC,EAAK,CAIvC,AAAI,EAAE,GAAQ,GACZ,GAAW,KAAK,GAIZ,GAAS,GACX,EAAW,iBCxBrB,aAAwB,CAC5B,MAAO,GAAQ,SAAC,EAAQ,EAAU,CAChC,EAAO,UAAU,GAAI,GAAmB,EAAY,OCHlD,YAAmB,EAAQ,CAC/B,MAAO,GAAI,UAAA,CAAM,MAAA,KCkCb,YACJ,EACA,EAAmC,CAEnC,MAAI,GAEK,SAAC,EAAqB,CAC3B,MAAA,IAAO,EAAkB,KAAK,GAAK,GAAI,MAAmB,EAAO,KAAK,GAAU,MAG7E,GAAS,SAAC,EAAO,EAAK,CAAK,MAAA,GAAsB,EAAO,GAAO,KAAK,GAAK,GAAI,GAAM,MCvBtF,YAAmB,EAAoB,EAAyC,CAAzC,AAAA,IAAA,QAAA,GAAA,IAC3C,GAAM,GAAW,GAAM,EAAK,GAC5B,MAAO,IAAU,UAAA,CAAM,MAAA,KC6EnB,WACJ,EACA,EAA0D,CAA1D,MAAA,KAAA,QAAA,GAA+B,IAK/B,EAAa,GAAU,KAAV,EAAc,GAEpB,EAAQ,SAAC,EAAQ,EAAU,CAGhC,GAAI,GAEA,EAAQ,GAEZ,EAAO,UACL,GAAI,GAAmB,EAAY,SAAC,EAAK,CAEvC,GAAM,GAAa,EAAY,GAK/B,AAAI,IAAS,CAAC,EAAY,EAAa,KAMrC,GAAQ,GACR,EAAc,EAGd,EAAW,KAAK,SAO1B,YAAwB,EAAQ,EAAM,CACpC,MAAO,KAAM,EClHT,WAAwD,EAAQ,EAAuC,CAC3G,MAAO,GAAqB,SAAC,EAAM,EAAI,CAAK,MAAA,GAAU,EAAQ,EAAE,GAAM,EAAE,IAAQ,EAAE,KAAS,EAAE,KCJzF,aAAiB,QAAI,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,GAAA,UAAA,GACzB,MAAO,UAAC,EAAqB,CAAK,MAAA,IAAO,EAAQ,EAAE,MAAA,OAAA,EAAA,GAAA,EAAI,OCFnD,WAAsB,EAAoB,CAC9C,MAAO,GAAQ,SAAC,EAAQ,EAAU,CAGhC,GAAI,CACF,EAAO,UAAU,WAEjB,EAAW,IAAI,MC3Bf,YAAsB,EAAa,CACvC,MAAO,IAAS,EACZ,UAAA,CAAM,MAAA,IACN,EAAQ,SAAC,EAAQ,EAAU,CAKzB,GAAI,GAAc,GAClB,EAAO,UACL,GAAI,GACF,EACA,SAAC,EAAK,CAEJ,EAAO,KAAK,GAGZ,EAAQ,EAAO,QAAU,EAAO,SAElC,UAAA,aAGE,OAAoB,GAAA,GAAA,GAAM,EAAA,EAAA,OAAA,CAAA,EAAA,KAAA,EAAA,EAAA,OAAE,CAAvB,GAAM,GAAK,EAAA,MACd,EAAW,KAAK,qGAElB,EAAW,YAGb,OACA,UAAA,CAEE,EAAS,UCrDjB,aAAe,QAAI,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,GAAA,UAAA,GACvB,GAAM,GAAY,GAAa,GACzB,EAAa,GAAU,EAAM,KACnC,SAAO,GAAe,GAEf,EAAQ,SAAC,EAAQ,EAAU,CAChC,GAAS,GAAY,GAAI,EAAA,CAAE,GAAM,EAAM,IAAgC,IAAY,UAAU,KCgB3F,aAAmB,QACvB,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,GAAA,UAAA,GAEA,MAAO,IAAK,MAAA,OAAA,EAAA,GAAA,EAAI,KCoEZ,YAAoB,EAAqC,OACzD,EAAQ,IACR,EAEJ,MAAI,IAAiB,MACnB,CAAI,MAAO,IAAkB,SACxB,GAA4B,EAAa,MAAzC,EAAK,IAAA,OAAG,IAAQ,EAAE,EAAU,EAAa,OAE5C,EAAQ,GAIL,GAAS,EACZ,UAAA,CAAM,MAAA,IACN,EAAQ,SAAC,EAAQ,EAAU,CACzB,GAAI,GAAQ,EACR,EAEE,EAAc,UAAA,CAGlB,GAFA,GAAS,MAAT,EAAW,cACX,EAAY,KACR,GAAS,KAAM,CACjB,GAAM,GAAW,MAAO,IAAU,SAAW,GAAM,GAAS,EAAU,EAAM,IACtE,EAAqB,GAAI,GAAmB,EAAY,UAAA,CAC5D,EAAmB,cACnB,MAEF,EAAS,UAAU,OAEnB,MAIE,EAAoB,UAAA,CACxB,GAAI,GAAY,GAChB,EAAY,EAAO,UACjB,GAAI,GAAmB,EAAY,OAAW,UAAA,CAC5C,AAAI,EAAE,EAAQ,EACZ,AAAI,EACF,IAEA,EAAY,GAGd,EAAW,cAKb,GACF,KAIJ,MC3HF,YAAoB,EAAyB,CACjD,MAAO,GAAQ,SAAC,EAAQ,EAAU,CAChC,GAAI,GAAW,GACX,EAAsB,KAC1B,EAAO,UACL,GAAI,GAAmB,EAAY,SAAC,EAAK,CACvC,EAAW,GACX,EAAY,KAGhB,GAAM,GAAO,UAAA,CACX,GAAI,EAAU,CACZ,EAAW,GACX,GAAM,GAAQ,EACd,EAAY,KACZ,EAAW,KAAK,KAGpB,EAAS,UAAU,GAAI,GAAmB,EAAY,EAAM,OCuB1D,YAAwB,EAA6D,EAAQ,CAMjG,MAAO,GAAQ,GAAc,EAAa,EAAW,UAAU,QAAU,EAAG,KCkDxE,YAAmB,EAA4B,CAA5B,AAAA,IAAA,QAAA,GAAA,IACf,GAAA,GAAgH,EAAO,UAAvH,EAAS,IAAA,OAAG,UAAA,CAAM,MAAA,IAAI,IAAY,EAAE,EAA4E,EAAO,aAAnF,EAAY,IAAA,OAAG,GAAI,EAAE,EAAuD,EAAO,gBAA9D,EAAe,IAAA,OAAG,GAAI,EAAE,EAA+B,EAAO,oBAAtC,EAAmB,IAAA,OAAG,GAAI,EAUnH,MAAO,UAAC,EAAa,CACnB,GAAI,GAAuC,KACvC,EAAuC,KACvC,EAAiC,KACjC,EAAW,EACX,EAAe,GACf,EAAa,GAEX,EAAc,UAAA,CAClB,GAAe,MAAf,EAAiB,cACjB,EAAkB,MAId,EAAQ,UAAA,CACZ,IACA,EAAa,EAAU,KACvB,EAAe,EAAa,IAExB,EAAsB,UAAA,CAG1B,GAAM,GAAO,EACb,IACA,GAAI,MAAJ,EAAM,eAGR,MAAO,GAAc,SAAC,EAAQ,GAAU,CACtC,IACI,CAAC,GAAc,CAAC,GAClB,IAOF,GAAM,IAAQ,EAAU,GAAO,KAAP,EAAW,IAOnC,GAAW,IAAI,UAAA,CACb,IAKI,IAAa,GAAK,CAAC,GAAc,CAAC,GACpC,GAAkB,GAAY,EAAqB,MAMvD,GAAK,UAAU,IAEV,GAMH,GAAa,GAAI,IAAe,CAC9B,KAAM,SAAC,GAAK,CAAK,MAAA,IAAK,KAAK,KAC3B,MAAO,SAAC,GAAG,CACT,EAAa,GACb,IACA,EAAkB,GAAY,EAAO,EAAc,IACnD,GAAK,MAAM,KAEb,SAAU,UAAA,CACR,EAAe,GACf,IACA,EAAkB,GAAY,EAAO,GACrC,GAAK,cAGT,GAAK,GAAQ,UAAU,MAExB,IAIP,YACE,EACA,EAA+C,QAC/C,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,EAAA,GAAA,UAAA,GAEA,MAAI,KAAO,GACT,KAEO,MAGL,IAAO,GACF,KAGF,EAAE,MAAA,OAAA,EAAA,GAAA,EAAI,KACV,KAAK,GAAK,IACV,UAAU,UAAA,CAAM,MAAA,OC5Gf,WACJ,EACA,EACA,EAAyB,SAErB,EACA,EAAW,GACf,MAAI,IAAsB,MAAO,IAAuB,SACtD,GAAa,GAAA,EAAmB,cAAU,MAAA,IAAA,OAAA,EAAI,IAC9C,EAAa,GAAA,EAAmB,cAAU,MAAA,IAAA,OAAA,EAAI,IAC9C,EAAW,CAAC,CAAC,EAAmB,SAChC,EAAY,EAAmB,WAE/B,EAAa,GAAkB,KAAlB,EAAsB,IAE9B,GAAS,CACd,UAAW,UAAA,CAAM,MAAA,IAAI,IAAc,EAAY,EAAY,IAC3D,aAAc,GACd,gBAAiB,GACjB,oBAAqB,ICpInB,YAAkB,EAAa,CACnC,MAAO,GAAO,SAAC,EAAG,EAAK,CAAK,MAAA,IAAS,ICYjC,YAAuB,EAAyB,CACpD,MAAO,GAAQ,SAAC,EAAQ,EAAU,CAChC,GAAI,GAAS,GAEP,EAAiB,GAAI,GACzB,EACA,UAAA,CACE,GAAc,MAAd,EAAgB,cAChB,EAAS,IAEX,IAGF,EAAU,GAAU,UAAU,GAE9B,EAAO,UAAU,GAAI,GAAmB,EAAY,SAAC,EAAK,CAAK,MAAA,IAAU,EAAW,KAAK,QCNvF,YAAmB,QAAO,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,GAAA,UAAA,GAC9B,GAAM,GAAY,GAAa,GAC/B,MAAO,GAAQ,SAAC,EAAQ,EAAU,CAIhC,AAAC,GAAY,GAAO,EAAQ,EAAQ,GAAa,GAAO,EAAQ,IAAS,UAAU,KCqBjF,WACJ,EACA,EAA6G,CAE7G,MAAO,GAAQ,SAAC,EAAQ,EAAU,CAChC,GAAI,GAAyD,KACzD,EAAQ,EAER,EAAa,GAIX,EAAgB,UAAA,CAAM,MAAA,IAAc,CAAC,GAAmB,EAAW,YAEzE,EAAO,UACL,GAAI,GACF,EACA,SAAC,EAAK,CAEJ,GAAe,MAAf,EAAiB,cACjB,GAAI,GAAa,EACX,EAAa,IAEnB,EAAU,EAAQ,EAAO,IAAa,UACnC,EAAkB,GAAI,GACrB,EAIA,SAAC,EAAU,CAAK,MAAA,GAAW,KAAK,EAAiB,EAAe,EAAO,EAAY,EAAY,KAAgB,IAC/G,UAAA,CAIE,EAAkB,KAClB,QAKR,UAAA,CACE,EAAa,GACb,SCrEJ,YACJ,EACA,EAA6G,CAE7G,MAAO,GAAW,GAAkB,EAAU,UAAA,CAAM,MAAA,IAAiB,GAAkB,EAAU,UAAA,CAAM,MAAA,KCjBnG,YAAuB,EAA8B,CACzD,MAAO,GAAQ,SAAC,EAAQ,EAAU,CAChC,EAAU,GAAU,UAAU,GAAI,GAAmB,EAAY,UAAA,CAAM,MAAA,GAAW,YAAY,KAC9F,CAAC,EAAW,QAAU,EAAO,UAAU,KCMrC,YAAuB,EAAiD,EAAiB,CAAjB,MAAA,KAAA,QAAA,GAAA,IACrE,EAAQ,SAAC,EAAQ,EAAU,CAChC,GAAI,GAAQ,EACZ,EAAO,UACL,GAAI,GAAmB,EAAY,SAAC,EAAK,CACvC,GAAM,GAAS,EAAU,EAAO,KAChC,AAAC,IAAU,IAAc,EAAW,KAAK,GACzC,CAAC,GAAU,EAAW,gBC6CxB,WACJ,EACA,EACA,EAA8B,CAK9B,GAAM,GACJ,EAAW,IAAmB,GAAS,EAElC,CAAE,KAAM,EAA2E,MAAK,EAAE,SAAQ,GACnG,EAEN,MAAO,GACH,EAAQ,SAAC,EAAQ,EAAU,OACzB,AAAA,GAAA,EAAY,aAAS,MAAA,IAAA,QAAA,EAAA,KAArB,GACA,GAAI,GAAU,GACd,EAAO,UACL,GAAI,GACF,EACA,SAAC,EAAK,OACJ,AAAA,GAAA,EAAY,QAAI,MAAA,IAAA,QAAA,EAAA,KAAhB,EAAmB,GACnB,EAAW,KAAK,IAElB,UAAA,OACE,EAAU,GACV,GAAA,EAAY,YAAQ,MAAA,IAAA,QAAA,EAAA,KAApB,GACA,EAAW,YAEb,SAAC,EAAG,OACF,EAAU,GACV,GAAA,EAAY,SAAK,MAAA,IAAA,QAAA,EAAA,KAAjB,EAAoB,GACpB,EAAW,MAAM,IAEnB,UAAA,SACE,AAAI,GACF,IAAA,EAAY,eAAW,MAAA,IAAA,QAAA,EAAA,KAAvB,IAEF,GAAA,EAAY,YAAQ,MAAA,IAAA,QAAA,EAAA,KAApB,QAQR,GC7IC,GAAM,IAAwC,CACnD,QAAS,GACT,SAAU,IAiDN,YACJ,EACA,EAA8C,CAA9C,MAAA,KAAA,QAAA,GAAA,IAEO,EAAQ,SAAC,EAAQ,EAAU,CACxB,GAAA,GAAsB,EAAM,QAAnB,EAAa,EAAM,SAChC,EAAW,GACX,EAAsB,KACtB,EAAiC,KACjC,EAAa,GAEX,EAAgB,UAAA,CACpB,GAAS,MAAT,EAAW,cACX,EAAY,KACR,GACF,KACA,GAAc,EAAW,aAIvB,EAAoB,UAAA,CACxB,EAAY,KACZ,GAAc,EAAW,YAGrB,EAAgB,SAAC,EAAQ,CAC7B,MAAC,GAAY,EAAU,EAAiB,IAAQ,UAAU,GAAI,GAAmB,EAAY,EAAe,KAExG,EAAO,UAAA,CACX,GAAI,EAAU,CAIZ,EAAW,GACX,GAAM,GAAQ,EACd,EAAY,KAEZ,EAAW,KAAK,GAChB,CAAC,GAAc,EAAc,KAIjC,EAAO,UACL,GAAI,GACF,EAMA,SAAC,EAAK,CACJ,EAAW,GACX,EAAY,EACZ,CAAE,IAAa,CAAC,EAAU,SAAY,GAAU,IAAS,EAAc,KAEzE,UAAA,CACE,EAAa,GACb,CAAE,IAAY,GAAY,GAAa,CAAC,EAAU,SAAW,EAAW,gBCtC5E,YACJ,EACA,EACA,EAA8B,CAD9B,AAAA,IAAA,QAAA,GAAA,IACA,IAAA,QAAA,GAAA,IAEA,GAAM,GAAY,GAAM,EAAU,GAClC,MAAO,IAAS,UAAA,CAAM,MAAA,IAAW,GC/B7B,aAAwB,QAAO,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,GAAA,UAAA,GACnC,GAAM,GAAU,GAAkB,GAElC,MAAO,GAAQ,SAAC,EAAQ,EAAU,CAehC,OAdM,GAAM,EAAO,OACb,EAAc,GAAI,OAAM,GAI1B,EAAW,EAAO,IAAI,UAAA,CAAM,MAAA,KAG5B,EAAQ,cAMH,EAAC,CACR,EAAU,EAAO,IAAI,UACnB,GAAI,GACF,EACA,SAAC,EAAK,CACJ,EAAY,GAAK,EACb,CAAC,GAAS,CAAC,EAAS,IAEtB,GAAS,GAAK,GAKb,GAAQ,EAAS,MAAM,MAAe,GAAW,QAKtD,MAlBG,EAAI,EAAG,EAAI,EAAK,MAAhB,GAwBT,EAAO,UACL,GAAI,GAAmB,EAAY,SAAC,EAAK,CACvC,GAAI,EAAO,CAET,GAAM,GAAM,EAAA,CAAI,GAAK,EAAK,IAC1B,EAAW,KAAK,EAAU,EAAO,MAAA,OAAA,EAAA,GAAA,EAAI,KAAU,SCnFnD,aAAa,QAAO,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,GAAA,UAAA,GACxB,MAAO,GAAQ,SAAC,EAAQ,EAAU,CAChC,GAAS,MAAA,OAAA,EAAA,CAAC,GAA8B,EAAM,KAAyC,UAAU,KCG/F,aAAiB,QAAkC,GAAA,GAAA,EAAA,EAAA,EAAA,UAAA,OAAA,IAAA,EAAA,GAAA,UAAA,GACvD,MAAO,IAAG,MAAA,OAAA,EAAA,GAAA,EAAI,KCaT,aAA4C,CACjD,GAAM,GAAY,GAAI,IAAwB,GAC9C,SAAU,SAAU,mBAAoB,CAAE,KAAM,KAC7C,UAAU,IAAM,EAAU,KAAK,WAG3B,ECFF,WACL,EAAkB,EAAmB,SAChC,CACL,MAAO,OAAM,KAAK,EAAK,iBAAoB,IAwBtC,WACL,EAAkB,EAAmB,SAClC,CACH,GAAM,GAAK,GAAsB,EAAU,GAC3C,GAAI,MAAO,IAAO,YAChB,KAAM,IAAI,gBACR,8BAA8B,oBAIlC,MAAO,GAuBF,YACL,EAAkB,EAAmB,SACtB,CACf,MAAO,GAAK,cAAiB,IAAa,OAQrC,aAAqD,CAC1D,MAAO,UAAS,wBAAyB,cACrC,SAAS,eAAiB,OChEzB,YACL,EACqB,CACrB,MAAO,GACL,EAAU,SAAS,KAAM,WACzB,EAAU,SAAS,KAAM,aAExB,KACC,GAAa,GACb,EAAI,IAAM,CACR,GAAM,GAAS,KACf,MAAO,OAAO,IAAW,YACrB,EAAG,SAAS,GACZ,KAEN,EAAU,IAAO,MACjB,KCdC,YACL,EACe,CACf,MAAO,CACL,EAAG,EAAG,WACN,EAAG,EAAG,WAaH,YACL,EAC2B,CAC3B,MAAO,GACL,EAAU,OAAQ,QAClB,EAAU,OAAQ,WAEjB,KACC,GAAU,EAAG,IACb,EAAI,IAAM,GAAiB,IAC3B,EAAU,GAAiB,KCtC1B,YACL,EACe,CACf,MAAO,CACL,EAAG,EAAG,WACN,EAAG,EAAG,WAaH,YACL,EAC2B,CAC3B,MAAO,GACL,EAAU,EAAI,UACd,EAAU,OAAQ,WAEjB,KACC,GAAU,EAAG,IACb,EAAI,IAAM,GAAwB,IAClC,EAAU,GAAwB,KClExC,GAAI,IAAW,UAAY,CACvB,GAAI,MAAO,MAAQ,YACf,MAAO,KASX,WAAkB,EAAK,EAAK,CACxB,GAAI,GAAS,GACb,SAAI,KAAK,SAAU,EAAO,EAAO,CAC7B,MAAI,GAAM,KAAO,EACb,GAAS,EACF,IAEJ,KAEJ,EAEX,MAAsB,WAAY,CAC9B,YAAmB,CACf,KAAK,YAAc,GAEvB,cAAO,eAAe,EAAQ,UAAW,OAAQ,CAI7C,IAAK,UAAY,CACb,MAAO,MAAK,YAAY,QAE5B,WAAY,GACZ,aAAc,KAMlB,EAAQ,UAAU,IAAM,SAAU,EAAK,CACnC,GAAI,GAAQ,EAAS,KAAK,YAAa,GACnC,EAAQ,KAAK,YAAY,GAC7B,MAAO,IAAS,EAAM,IAO1B,EAAQ,UAAU,IAAM,SAAU,EAAK,EAAO,CAC1C,GAAI,GAAQ,EAAS,KAAK,YAAa,GACvC,AAAI,CAAC,EACD,KAAK,YAAY,GAAO,GAAK,EAG7B,KAAK,YAAY,KAAK,CAAC,EAAK,KAOpC,EAAQ,UAAU,OAAS,SAAU,EAAK,CACtC,GAAI,GAAU,KAAK,YACf,EAAQ,EAAS,EAAS,GAC9B,AAAI,CAAC,GACD,EAAQ,OAAO,EAAO,IAO9B,EAAQ,UAAU,IAAM,SAAU,EAAK,CACnC,MAAO,CAAC,CAAC,CAAC,EAAS,KAAK,YAAa,IAKzC,EAAQ,UAAU,MAAQ,UAAY,CAClC,KAAK,YAAY,OAAO,IAO5B,EAAQ,UAAU,QAAU,SAAU,EAAU,EAAK,CACjD,AAAI,IAAQ,QAAU,GAAM,MAC5B,OAAS,GAAK,EAAG,EAAK,KAAK,YAAa,EAAK,EAAG,OAAQ,IAAM,CAC1D,GAAI,GAAQ,EAAG,GACf,EAAS,KAAK,EAAK,EAAM,GAAI,EAAM,MAGpC,QAOX,GAAY,MAAO,SAAW,aAAe,MAAO,WAAa,aAAe,OAAO,WAAa,SAGpG,GAAY,UAAY,CACxB,MAAI,OAAO,SAAW,aAAe,OAAO,OAAS,KAC1C,OAEP,MAAO,OAAS,aAAe,KAAK,OAAS,KACtC,KAEP,MAAO,SAAW,aAAe,OAAO,OAAS,KAC1C,OAGJ,SAAS,oBAShB,GAA2B,UAAY,CACvC,MAAI,OAAO,wBAA0B,WAI1B,sBAAsB,KAAK,IAE/B,SAAU,EAAU,CAAE,MAAO,YAAW,UAAY,CAAE,MAAO,GAAS,KAAK,QAAW,IAAO,QAIpG,GAAkB,EAStB,YAAmB,EAAU,EAAO,CAChC,GAAI,GAAc,GAAO,EAAe,GAAO,EAAe,EAO9D,YAA0B,CACtB,AAAI,GACA,GAAc,GACd,KAEA,GACA,IAUR,YAA2B,CACvB,GAAwB,GAO5B,YAAiB,CACb,GAAI,GAAY,KAAK,MACrB,GAAI,EAAa,CAEb,GAAI,EAAY,EAAe,GAC3B,OAMJ,EAAe,OAGf,GAAc,GACd,EAAe,GACf,WAAW,EAAiB,GAEhC,EAAe,EAEnB,MAAO,GAIX,GAAI,IAAgB,GAGhB,GAAiB,CAAC,MAAO,QAAS,SAAU,OAAQ,QAAS,SAAU,OAAQ,UAE/E,GAA4B,MAAO,mBAAqB,YAIxD,GAA0C,UAAY,CAMtD,YAAoC,CAMhC,KAAK,WAAa,GAMlB,KAAK,qBAAuB,GAM5B,KAAK,mBAAqB,KAM1B,KAAK,WAAa,GAClB,KAAK,iBAAmB,KAAK,iBAAiB,KAAK,MACnD,KAAK,QAAU,GAAS,KAAK,QAAQ,KAAK,MAAO,IAQrD,SAAyB,UAAU,YAAc,SAAU,EAAU,CACjE,AAAK,CAAC,KAAK,WAAW,QAAQ,IAC1B,KAAK,WAAW,KAAK,GAGpB,KAAK,YACN,KAAK,YASb,EAAyB,UAAU,eAAiB,SAAU,EAAU,CACpE,GAAI,GAAY,KAAK,WACjB,EAAQ,EAAU,QAAQ,GAE9B,AAAI,CAAC,GACD,EAAU,OAAO,EAAO,GAGxB,CAAC,EAAU,QAAU,KAAK,YAC1B,KAAK,eASb,EAAyB,UAAU,QAAU,UAAY,CACrD,GAAI,GAAkB,KAAK,mBAG3B,AAAI,GACA,KAAK,WAWb,EAAyB,UAAU,iBAAmB,UAAY,CAE9D,GAAI,GAAkB,KAAK,WAAW,OAAO,SAAU,EAAU,CAC7D,MAAO,GAAS,eAAgB,EAAS,cAO7C,SAAgB,QAAQ,SAAU,EAAU,CAAE,MAAO,GAAS,oBACvD,EAAgB,OAAS,GAQpC,EAAyB,UAAU,SAAW,UAAY,CAGtD,AAAI,CAAC,IAAa,KAAK,YAMvB,UAAS,iBAAiB,gBAAiB,KAAK,kBAChD,OAAO,iBAAiB,SAAU,KAAK,SACvC,AAAI,GACA,MAAK,mBAAqB,GAAI,kBAAiB,KAAK,SACpD,KAAK,mBAAmB,QAAQ,SAAU,CACtC,WAAY,GACZ,UAAW,GACX,cAAe,GACf,QAAS,MAIb,UAAS,iBAAiB,qBAAsB,KAAK,SACrD,KAAK,qBAAuB,IAEhC,KAAK,WAAa,KAQtB,EAAyB,UAAU,YAAc,UAAY,CAGzD,AAAI,CAAC,IAAa,CAAC,KAAK,YAGxB,UAAS,oBAAoB,gBAAiB,KAAK,kBACnD,OAAO,oBAAoB,SAAU,KAAK,SACtC,KAAK,oBACL,KAAK,mBAAmB,aAExB,KAAK,sBACL,SAAS,oBAAoB,qBAAsB,KAAK,SAE5D,KAAK,mBAAqB,KAC1B,KAAK,qBAAuB,GAC5B,KAAK,WAAa,KAStB,EAAyB,UAAU,iBAAmB,SAAU,EAAI,CAChE,GAAI,GAAK,EAAG,aAAc,EAAe,IAAO,OAAS,GAAK,EAE1D,EAAmB,GAAe,KAAK,SAAU,EAAK,CACtD,MAAO,CAAC,CAAC,CAAC,EAAa,QAAQ,KAEnC,AAAI,GACA,KAAK,WAQb,EAAyB,YAAc,UAAY,CAC/C,MAAK,MAAK,WACN,MAAK,UAAY,GAAI,IAElB,KAAK,WAOhB,EAAyB,UAAY,KAC9B,KAUP,GAAsB,SAAU,EAAQ,EAAO,CAC/C,OAAS,GAAK,EAAG,EAAK,OAAO,KAAK,GAAQ,EAAK,EAAG,OAAQ,IAAM,CAC5D,GAAI,GAAM,EAAG,GACb,OAAO,eAAe,EAAQ,EAAK,CAC/B,MAAO,EAAM,GACb,WAAY,GACZ,SAAU,GACV,aAAc,KAGtB,MAAO,IASP,GAAe,SAAU,EAAQ,CAIjC,GAAI,GAAc,GAAU,EAAO,eAAiB,EAAO,cAAc,YAGzE,MAAO,IAAe,IAItB,GAAY,GAAe,EAAG,EAAG,EAAG,GAOxC,YAAiB,EAAO,CACpB,MAAO,YAAW,IAAU,EAShC,YAAwB,EAAQ,CAE5B,OADI,GAAY,GACP,EAAK,EAAG,EAAK,UAAU,OAAQ,IACpC,EAAU,EAAK,GAAK,UAAU,GAElC,MAAO,GAAU,OAAO,SAAU,EAAM,EAAU,CAC9C,GAAI,GAAQ,EAAO,UAAY,EAAW,UAC1C,MAAO,GAAO,GAAQ,IACvB,GAQP,YAAqB,EAAQ,CAGzB,OAFI,GAAY,CAAC,MAAO,QAAS,SAAU,QACvC,EAAW,GACN,EAAK,EAAG,EAAc,EAAW,EAAK,EAAY,OAAQ,IAAM,CACrE,GAAI,GAAW,EAAY,GACvB,EAAQ,EAAO,WAAa,GAChC,EAAS,GAAY,GAAQ,GAEjC,MAAO,GASX,YAA2B,EAAQ,CAC/B,GAAI,GAAO,EAAO,UAClB,MAAO,IAAe,EAAG,EAAG,EAAK,MAAO,EAAK,QAQjD,YAAmC,EAAQ,CAGvC,GAAI,GAAc,EAAO,YAAa,EAAe,EAAO,aAS5D,GAAI,CAAC,GAAe,CAAC,EACjB,MAAO,IAEX,GAAI,GAAS,GAAY,GAAQ,iBAAiB,GAC9C,EAAW,GAAY,GACvB,EAAW,EAAS,KAAO,EAAS,MACpC,EAAU,EAAS,IAAM,EAAS,OAKlC,EAAQ,GAAQ,EAAO,OAAQ,EAAS,GAAQ,EAAO,QAqB3D,GAlBI,EAAO,YAAc,cAOjB,MAAK,MAAM,EAAQ,KAAc,GACjC,IAAS,GAAe,EAAQ,OAAQ,SAAW,GAEnD,KAAK,MAAM,EAAS,KAAa,GACjC,IAAU,GAAe,EAAQ,MAAO,UAAY,IAOxD,CAAC,GAAkB,GAAS,CAK5B,GAAI,GAAgB,KAAK,MAAM,EAAQ,GAAY,EAC/C,EAAiB,KAAK,MAAM,EAAS,GAAW,EAMpD,AAAI,KAAK,IAAI,KAAmB,GAC5B,IAAS,GAET,KAAK,IAAI,KAAoB,GAC7B,IAAU,GAGlB,MAAO,IAAe,EAAS,KAAM,EAAS,IAAK,EAAO,GAQ9D,GAAI,IAAwB,UAAY,CAGpC,MAAI,OAAO,qBAAuB,YACvB,SAAU,EAAQ,CAAE,MAAO,aAAkB,IAAY,GAAQ,oBAKrE,SAAU,EAAQ,CAAE,MAAQ,aAAkB,IAAY,GAAQ,YACrE,MAAO,GAAO,SAAY,eAQlC,YAA2B,EAAQ,CAC/B,MAAO,KAAW,GAAY,GAAQ,SAAS,gBAQnD,YAAwB,EAAQ,CAC5B,MAAK,IAGD,GAAqB,GACd,GAAkB,GAEtB,GAA0B,GALtB,GAcf,YAA4B,EAAI,CAC5B,GAAI,GAAI,EAAG,EAAG,EAAI,EAAG,EAAG,EAAQ,EAAG,MAAO,EAAS,EAAG,OAElD,EAAS,MAAO,kBAAoB,YAAc,gBAAkB,OACpE,EAAO,OAAO,OAAO,EAAO,WAEhC,UAAmB,EAAM,CACrB,EAAG,EAAG,EAAG,EAAG,MAAO,EAAO,OAAQ,EAClC,IAAK,EACL,MAAO,EAAI,EACX,OAAQ,EAAS,EACjB,KAAM,IAEH,EAYX,YAAwB,EAAG,EAAG,EAAO,EAAQ,CACzC,MAAO,CAAE,EAAG,EAAG,EAAG,EAAG,MAAO,EAAO,OAAQ,GAO/C,GAAI,IAAmC,UAAY,CAM/C,WAA2B,EAAQ,CAM/B,KAAK,eAAiB,EAMtB,KAAK,gBAAkB,EAMvB,KAAK,aAAe,GAAe,EAAG,EAAG,EAAG,GAC5C,KAAK,OAAS,EAQlB,SAAkB,UAAU,SAAW,UAAY,CAC/C,GAAI,GAAO,GAAe,KAAK,QAC/B,YAAK,aAAe,EACZ,EAAK,QAAU,KAAK,gBACxB,EAAK,SAAW,KAAK,iBAQ7B,EAAkB,UAAU,cAAgB,UAAY,CACpD,GAAI,GAAO,KAAK,aAChB,YAAK,eAAiB,EAAK,MAC3B,KAAK,gBAAkB,EAAK,OACrB,GAEJ,KAGP,GAAqC,UAAY,CAOjD,WAA6B,EAAQ,EAAU,CAC3C,GAAI,GAAc,GAAmB,GAOrC,GAAmB,KAAM,CAAE,OAAQ,EAAQ,YAAa,IAE5D,MAAO,MAGP,GAAmC,UAAY,CAW/C,WAA2B,EAAU,EAAY,EAAa,CAc1D,GAPA,KAAK,oBAAsB,GAM3B,KAAK,cAAgB,GAAI,IACrB,MAAO,IAAa,WACpB,KAAM,IAAI,WAAU,2DAExB,KAAK,UAAY,EACjB,KAAK,YAAc,EACnB,KAAK,aAAe,EAQxB,SAAkB,UAAU,QAAU,SAAU,EAAQ,CACpD,GAAI,CAAC,UAAU,OACX,KAAM,IAAI,WAAU,4CAGxB,GAAI,QAAO,UAAY,aAAe,CAAE,mBAAmB,UAG3D,IAAI,CAAE,aAAkB,IAAY,GAAQ,SACxC,KAAM,IAAI,WAAU,yCAExB,GAAI,GAAe,KAAK,cAExB,AAAI,EAAa,IAAI,IAGrB,GAAa,IAAI,EAAQ,GAAI,IAAkB,IAC/C,KAAK,YAAY,YAAY,MAE7B,KAAK,YAAY,aAQrB,EAAkB,UAAU,UAAY,SAAU,EAAQ,CACtD,GAAI,CAAC,UAAU,OACX,KAAM,IAAI,WAAU,4CAGxB,GAAI,QAAO,UAAY,aAAe,CAAE,mBAAmB,UAG3D,IAAI,CAAE,aAAkB,IAAY,GAAQ,SACxC,KAAM,IAAI,WAAU,yCAExB,GAAI,GAAe,KAAK,cAExB,AAAI,CAAC,EAAa,IAAI,IAGtB,GAAa,OAAO,GACf,EAAa,MACd,KAAK,YAAY,eAAe,SAQxC,EAAkB,UAAU,WAAa,UAAY,CACjD,KAAK,cACL,KAAK,cAAc,QACnB,KAAK,YAAY,eAAe,OAQpC,EAAkB,UAAU,aAAe,UAAY,CACnD,GAAI,GAAQ,KACZ,KAAK,cACL,KAAK,cAAc,QAAQ,SAAU,EAAa,CAC9C,AAAI,EAAY,YACZ,EAAM,oBAAoB,KAAK,MAU3C,EAAkB,UAAU,gBAAkB,UAAY,CAEtD,GAAI,EAAC,KAAK,YAGV,IAAI,GAAM,KAAK,aAEX,EAAU,KAAK,oBAAoB,IAAI,SAAU,EAAa,CAC9D,MAAO,IAAI,IAAoB,EAAY,OAAQ,EAAY,mBAEnE,KAAK,UAAU,KAAK,EAAK,EAAS,GAClC,KAAK,gBAOT,EAAkB,UAAU,YAAc,UAAY,CAClD,KAAK,oBAAoB,OAAO,IAOpC,EAAkB,UAAU,UAAY,UAAY,CAChD,MAAO,MAAK,oBAAoB,OAAS,GAEtC,KAMP,GAAY,MAAO,UAAY,YAAc,GAAI,SAAY,GAAI,IAKjE,GAAgC,UAAY,CAO5C,WAAwB,EAAU,CAC9B,GAAI,CAAE,gBAAgB,IAClB,KAAM,IAAI,WAAU,sCAExB,GAAI,CAAC,UAAU,OACX,KAAM,IAAI,WAAU,4CAExB,GAAI,GAAa,GAAyB,cACtC,EAAW,GAAI,IAAkB,EAAU,EAAY,MAC3D,GAAU,IAAI,KAAM,GAExB,MAAO,MAGX,CACI,UACA,YACA,cACF,QAAQ,SAAU,EAAQ,CACxB,GAAe,UAAU,GAAU,UAAY,CAC3C,GAAI,GACJ,MAAQ,GAAK,GAAU,IAAI,OAAO,GAAQ,MAAM,EAAI,cAI5D,GAAI,IAAS,UAAY,CAErB,MAAI,OAAO,IAAS,gBAAmB,YAC5B,GAAS,eAEb,MAGJ,GAAQ,GCr2Bf,GAAM,IAAS,GAAI,GAYb,GAAY,EAAM,IAAM,EAC5B,GAAI,IAAe,GAAW,CAC5B,OAAW,KAAS,GAClB,GAAO,KAAK,OAGf,KACC,EAAU,GAAY,EAAM,GAAO,EAAG,IACnC,KACC,EAAS,IAAM,EAAS,gBAG5B,EAAY,IAcT,YACL,EACa,CACb,MAAO,CACL,MAAQ,EAAG,YACX,OAAQ,EAAG,cAyBR,YACL,EACyB,CACzB,MAAO,IACJ,KACC,EAAI,GAAY,EAAS,QAAQ,IACjC,EAAU,GAAY,GACnB,KACC,EAAO,CAAC,CAAE,YAAa,IAAW,GAClC,EAAS,IAAM,EAAS,UAAU,IAClC,EAAI,IAAM,GAAe,MAG7B,EAAU,GAAe,KCxGxB,YACL,EACa,CACb,MAAO,CACL,MAAQ,EAAG,YACX,OAAQ,EAAG,cCWf,GAAM,IAAS,GAAI,GAUb,GAAY,EAAM,IAAM,EAC5B,GAAI,sBAAqB,GAAW,CAClC,OAAW,KAAS,GAClB,GAAO,KAAK,IACb,CACD,UAAW,MAGZ,KACC,EAAU,GAAY,EAAM,GAAO,EAAG,IACnC,KACC,EAAS,IAAM,EAAS,gBAG5B,EAAY,IAyCT,YACL,EAAiB,EAAY,GACR,CACrB,MAAO,IAA0B,GAC9B,KACC,EAAI,CAAC,CAAE,OAAQ,CACb,GAAM,GAAU,GAAe,GACzB,EAAU,GAAsB,GACtC,MAAO,IACL,EAAQ,OAAS,EAAQ,OAAS,IAGtC,KC/EN,GAAM,IAA4C,CAChD,OAAQ,EAAW,2BACnB,OAAQ,EAAW,4BAcd,YAAmB,EAAuB,CAC/C,MAAO,IAAQ,GAAM,QAchB,YAAmB,EAAc,EAAsB,CAC5D,AAAI,GAAQ,GAAM,UAAY,GAC5B,GAAQ,GAAM,QAYX,YAAqB,EAAmC,CAC7D,GAAM,GAAK,GAAQ,GACnB,MAAO,GAAU,EAAI,UAClB,KACC,EAAI,IAAM,EAAG,SACb,EAAU,EAAG,UChCnB,YACE,EAAiB,EACR,CACT,OAAQ,EAAG,iBAGJ,kBAEH,MAAI,GAAG,OAAS,QACP,SAAS,KAAK,GAEd,OAGN,uBACA,qBACH,MAAO,WAIP,MAAO,GAAG,mBAaT,aAA+C,CACpD,MAAO,GAAyB,OAAQ,WACrC,KACC,EAAO,GAAM,CAAE,GAAG,SAAW,EAAG,UAChC,EAAI,GAAO,EACT,KAAM,GAAU,UAAY,SAAW,SACvC,KAAM,EAAG,IACT,OAAQ,CACN,EAAG,iBACH,EAAG,sBAGP,EAAO,CAAC,CAAE,OAAM,UAAW,CACzB,GAAI,IAAS,SAAU,CACrB,GAAM,GAAS,KACf,GAAI,MAAO,IAAW,YACpB,MAAO,CAAC,GAAwB,EAAQ,GAE5C,MAAO,KAET,MClFC,aAA4B,CACjC,MAAO,IAAI,KAAI,SAAS,MAQnB,YAAqB,EAAgB,CAC1C,SAAS,KAAO,EAAI,KAUf,aAAuC,CAC5C,MAAO,IAAI,GCJb,YAAqB,EAAiB,EAA8B,CAGlE,GAAI,MAAO,IAAU,UAAY,MAAO,IAAU,SAChD,EAAG,WAAa,EAAM,mBAGb,YAAiB,MAC1B,EAAG,YAAY,WAGN,MAAM,QAAQ,GACvB,OAAW,KAAQ,GACjB,GAAY,EAAI,GA2Bf,WACL,EAAa,KAAmC,EAC7C,CACH,GAAM,GAAK,SAAS,cAAc,GAGlC,GAAI,EACF,OAAW,KAAQ,QAAO,KAAK,GAC7B,AAAI,MAAO,GAAW,IAAU,UAC9B,EAAG,aAAa,EAAM,EAAW,IAC1B,EAAW,IAClB,EAAG,aAAa,EAAM,IAG5B,OAAW,KAAS,GAClB,GAAY,EAAI,GAGlB,MAAO,GC1EF,YAAkB,EAAe,EAAmB,CACzD,GAAI,GAAI,EACR,GAAI,EAAM,OAAS,EAAG,CACpB,KAAO,EAAM,KAAO,KAAO,EAAE,EAAI,GAAG,CACpC,MAAO,GAAG,EAAM,UAAU,EAAG,QAE/B,MAAO,GAmBF,YAAe,EAAuB,CAC3C,GAAI,EAAQ,IAAK,CACf,GAAM,GAAS,CAAG,IAAQ,KAAO,IAAO,IACxC,MAAO,GAAK,IAAQ,MAAY,KAAM,QAAQ,UAE9C,OAAO,GAAM,WC1BV,aAAmC,CACxC,MAAO,UAAS,KAAK,UAAU,GAa1B,YAAyB,EAAoB,CAClD,GAAM,GAAK,EAAE,IAAK,CAAE,KAAM,IAC1B,EAAG,iBAAiB,QAAS,GAAM,EAAG,mBACtC,EAAG,QAUE,aAAiD,CACtD,MAAO,GAA2B,OAAQ,cACvC,KACC,EAAI,IACJ,EAAU,MACV,EAAO,GAAQ,EAAK,OAAS,GAC7B,EAAY,IASX,aAAwD,CAC7D,MAAO,MACJ,KACC,EAAI,GAAM,GAAmB,QAAQ,QACrC,EAAO,GAAM,MAAO,IAAO,cCxC1B,YAAoB,EAAoC,CAC7D,GAAM,GAAQ,WAAW,GACzB,MAAO,IAA0B,GAC/B,EAAM,YAAY,IAAM,EAAK,EAAM,WAElC,KACC,EAAU,EAAM,UASf,aAA2C,CAChD,GAAM,GAAQ,WAAW,SACzB,MAAO,GACL,EAAU,OAAQ,eAAe,KAAK,GAAM,KAC5C,EAAU,OAAQ,cAAc,KAAK,GAAM,MAE1C,KACC,EAAU,EAAM,UAgBf,YACL,EAA6B,EACd,CACf,MAAO,GACJ,KACC,EAAU,GAAU,EAAS,IAAY,IC5CxC,YACL,EAAmB,EAAuB,CAAE,YAAa,eACnC,CACtB,MAAO,IAAK,MAAM,GAAG,IAAO,IACzB,KACC,EAAO,GAAO,EAAI,SAAW,KAC7B,GAAW,IAAM,IAchB,YACL,EAAmB,EACJ,CACf,MAAO,IAAQ,EAAK,GACjB,KACC,EAAU,GAAO,EAAI,QACrB,EAAY,IAYX,YACL,EAAmB,EACG,CACtB,GAAM,GAAM,GAAI,WAChB,MAAO,IAAQ,EAAK,GACjB,KACC,EAAU,GAAO,EAAI,QACrB,EAAI,GAAO,EAAI,gBAAgB,EAAK,aACpC,EAAY,ICxCX,aAA6C,CAClD,MAAO,CACL,EAAG,KAAK,IAAI,EAAG,SACf,EAAG,KAAK,IAAI,EAAG,UAWZ,aAA2D,CAChE,MAAO,GACL,EAAU,OAAQ,SAAU,CAAE,QAAS,KACvC,EAAU,OAAQ,SAAU,CAAE,QAAS,MAEtC,KACC,EAAI,IACJ,EAAU,OCzBT,aAAyC,CAC9C,MAAO,CACL,MAAQ,WACR,OAAQ,aAWL,aAAuD,CAC5D,MAAO,GAAU,OAAQ,SAAU,CAAE,QAAS,KAC3C,KACC,EAAI,IACJ,EAAU,OCTT,aAA+C,CACpD,MAAO,GAAc,CACnB,KACA,OAEC,KACC,EAAI,CAAC,CAAC,EAAQ,KAAW,EAAE,SAAQ,UACnC,EAAY,ICRX,YACL,EAAiB,CAAE,YAAW,WACR,CACtB,GAAM,GAAQ,EACX,KACC,EAAwB,SAItB,EAAU,EAAc,CAAC,EAAO,IACnC,KACC,EAAI,IAAM,GAAiB,KAI/B,MAAO,GAAc,CAAC,EAAS,EAAW,IACvC,KACC,EAAI,CAAC,CAAC,CAAE,UAAU,CAAE,SAAQ,QAAQ,CAAE,IAAG,QAAU,EACjD,OAAQ,CACN,EAAG,EAAO,EAAI,EACd,EAAG,EAAO,EAAI,EAAI,GAEpB,WCOD,YACL,EAAgB,CAAE,OACH,CAGf,GAAM,GAAM,EAAwB,EAAQ,WACzC,KACC,EAAI,CAAC,CAAE,UAAW,IAItB,MAAO,GACJ,KACC,GAAS,IAAM,EAAK,CAAE,QAAS,GAAM,SAAU,KAC/C,EAAI,GAAW,EAAO,YAAY,IAClC,GAAY,GACZ,MCFN,GAAM,IAAS,EAAW,aACpB,GAAiB,KAAK,MAAM,GAAO,aACzC,GAAO,KAAO,GAAG,GAAI,KAAI,GAAO,KAAM,QAW/B,aAAiC,CACtC,MAAO,IAUF,YAAiB,EAAqB,CAC3C,MAAO,IAAO,SAAS,SAAS,GAW3B,WACL,EAAkB,EACV,CACR,MAAO,OAAO,IAAU,YACpB,GAAO,aAAa,GAAK,QAAQ,IAAK,EAAM,YAC5C,GAAO,aAAa,GC7BnB,YACL,EAAS,EAAmB,SACP,CACrB,MAAO,GAAW,sBAAsB,KAAS,GAa5C,YACL,EAAS,EAAmB,SACL,CACvB,MAAO,GAAY,sBAAsB,KAAS,GC9GpD,OAAwB,SCajB,YAA0B,EAAyB,CACxD,MACE,GAAC,QAAD,CAAO,MAAM,gBAAgB,SAAU,GACrC,EAAC,MAAD,CAAK,MAAM,mCACT,EAAC,MAAD,CAAK,MAAM,kCAEb,EAAC,OAAD,CAAM,MAAM,wBACV,EAAC,OAAD,CAAM,wBAAuB,MCN9B,YAA+B,EAAyB,CAC7D,MACE,GAAC,SAAD,CACE,MAAM,uBACN,MAAO,EAAY,kBACnB,wBAAuB,IAAI,aCejC,YACE,EAA2C,EAC9B,CACb,GAAM,GAAS,EAAO,EAChB,EAAS,EAAO,EAGhB,EAAU,OAAO,KAAK,EAAS,OAClC,OAAO,GAAO,CAAC,EAAS,MAAM,IAC9B,OAAyB,CAAC,EAAM,IAAQ,CACvC,GAAG,EAAM,EAAC,MAAD,KAAM,GAAY,KAC1B,IACF,MAAM,EAAG,IAGN,EAAM,GAAI,KAAI,EAAS,UAC7B,MAAI,IAAQ,qBACV,EAAI,aAAa,IAAI,IAAK,OAAO,QAAQ,EAAS,OAC/C,OAAO,CAAC,CAAC,CAAE,KAAW,GACtB,OAAO,CAAC,EAAW,CAAC,KAAW,GAAG,KAAa,IAAQ,OAAQ,KAKlE,EAAC,IAAD,CAAG,KAAM,GAAG,IAAO,MAAM,yBAAyB,SAAU,IAC1D,EAAC,UAAD,CACE,MAAO,CAAC,4BAA6B,GAAG,EACpC,CAAC,uCACD,IACF,KAAK,KACP,gBAAe,EAAS,MAAM,QAAQ,IAErC,EAAS,GAAK,EAAC,MAAD,CAAK,MAAM,mCAC1B,EAAC,KAAD,CAAI,MAAM,2BAA2B,EAAS,OAC7C,EAAS,GAAK,EAAS,KAAK,OAAS,GACpC,EAAC,IAAD,CAAG,MAAM,4BACN,GAAS,EAAS,KAAM,MAG5B,EAAS,GAAK,EAAQ,OAAS,GAC9B,EAAC,IAAD,CAAG,MAAM,2BACN,EAAY,8BAA8B,KAAM,KAmBtD,YACL,EACa,CACb,GAAM,GAAY,EAAO,GAAG,MACtB,EAAO,CAAC,GAAG,GAGX,EAAS,EAAK,UAAU,GAAO,CAAC,EAAI,SAAS,SAAS,MACtD,CAAC,GAAW,EAAK,OAAO,EAAQ,GAGlC,EAAQ,EAAK,UAAU,GAAO,EAAI,MAAQ,GAC9C,AAAI,IAAU,IACZ,GAAQ,EAAK,QAGf,GAAM,GAAO,EAAK,MAAM,EAAG,GACrB,EAAO,EAAK,MAAM,GAGlB,EAAW,CACf,GAAqB,EAAS,EAAc,CAAE,EAAC,GAAU,IAAU,IACnE,GAAG,EAAK,IAAI,GAAW,GAAqB,EAAS,IACrD,GAAG,EAAK,OAAS,CACf,EAAC,UAAD,CAAS,MAAM,0BACb,EAAC,UAAD,CAAS,SAAU,IAChB,EAAK,OAAS,GAAK,EAAK,SAAW,EAChC,EAAY,0BACZ,EAAY,2BAA4B,EAAK,SAG/C,EAAK,IAAI,GAAW,GAAqB,EAAS,MAEtD,IAIN,MACE,GAAC,KAAD,CAAI,MAAM,0BACP,GCvHA,YAA2B,EAAiC,CACjE,MACE,GAAC,KAAD,CAAI,MAAM,oBACP,OAAO,QAAQ,GAAO,IAAI,CAAC,CAAC,EAAK,KAChC,EAAC,KAAD,CAAI,MAAO,oCAAoC,KAC5C,MAAO,IAAU,SAAW,GAAM,GAAS,KCN/C,YAAqB,EAAiC,CAC3D,MACE,GAAC,MAAD,CAAK,MAAM,0BACT,EAAC,MAAD,CAAK,MAAM,qBACR,ICUT,YAAuB,EAA+B,CACpD,GAAM,GAAS,KAGT,EAAM,GAAI,KAAI,MAAM,EAAQ,WAAY,EAAO,MACrD,MACE,GAAC,KAAD,CAAI,MAAM,oBACR,EAAC,IAAD,CAAG,KAAM,EAAI,WAAY,MAAM,oBAC5B,EAAQ,QAkBV,YACL,EAAqB,EACR,CACb,MACE,GAAC,MAAD,CAAK,MAAM,cACT,EAAC,SAAD,CACE,MAAM,sBACN,aAAY,EAAY,yBAEvB,EAAO,OAEV,EAAC,KAAD,CAAI,MAAM,oBACP,EAAS,IAAI,MCdf,YACL,EAAiB,EACO,CACxB,GAAM,GAAU,EAAM,IAAM,EAAc,CACxC,GAAmB,GACnB,GAA0B,MAEzB,KACC,EAAI,CAAC,CAAC,CAAE,IAAG,KAAK,KAAY,CAC1B,GAAM,CAAE,SAAU,GAAe,GACjC,MAAQ,CACN,EAAG,EAAI,EAAO,EAAI,EAAQ,EAC1B,EAAG,EAAI,EAAO,MAMtB,MAAO,IAAkB,GACtB,KACC,EAAU,GAAU,EACjB,KACC,EAAI,GAAW,EAAE,SAAQ,YACzB,GAAK,CAAC,CAAC,GAAU,QAcpB,YACL,EAAiB,EACkB,CACnC,MAAO,GAAM,IAAM,CACjB,GAAM,GAAQ,GAAI,GAClB,EAAM,UAAU,CAGd,KAAK,CAAE,UAAU,CACf,EAAG,MAAM,YAAY,iBAAkB,GAAG,EAAO,OACjD,EAAG,MAAM,YAAY,iBAAkB,GAAG,EAAO,QAInD,UAAW,CACT,EAAG,MAAM,eAAe,kBACxB,EAAG,MAAM,eAAe,qBAK5B,EACG,KACC,GAAa,IAAK,IAClB,EAAI,IAAM,EAAU,yBACpB,EAAI,CAAC,CAAE,OAAQ,IAEd,UAAU,CAGT,KAAK,EAAQ,CACX,AAAI,EACF,EAAG,MAAM,YAAY,iBAAkB,GAAG,CAAC,OAE3C,EAAG,MAAM,eAAe,mBAI5B,UAAW,CACT,EAAG,MAAM,eAAe,qBAKhC,GAAM,GAAQ,EAAW,uBAAwB,GAC3C,EAAQ,EAAU,EAAO,YAAa,CAAE,KAAM,KACpD,SACG,KACC,EAAU,CAAC,CAAE,YAAa,EAAS,EAAQ,GAC3C,EAAI,GAAM,EAAG,mBAEZ,UAAU,IAAM,EAAG,QAGjB,GAAgB,EAAI,GACxB,KACC,EAAI,GAAS,EAAM,KAAK,IACxB,EAAS,IAAM,EAAM,YACrB,EAAI,GAAU,GAAE,IAAK,GAAO,OCnGpC,YAA+B,EAAgC,CAC7D,GAAM,GAAkB,GACxB,OAAW,KAAW,GAAY,eAAgB,GAAY,CAC5D,GAAI,GACA,EAAO,EAAQ,WAGnB,KAAQ,EAAQ,YAAY,KAAK,EAAK,cAAgB,CACpD,GAAM,GAAS,EAAK,UAAU,EAAM,OACpC,EAAO,EAAO,UAAU,EAAM,GAAG,QACjC,EAAQ,KAAK,IAGjB,MAAO,GAST,YAAc,EAAqB,EAA2B,CAC5D,EAAO,OAAO,GAAG,MAAM,KAAK,EAAO,aAqB9B,YACL,EAAiB,EAAwB,CAAE,UACR,CAGnC,GAAM,GAAc,GAAI,KACxB,OAAW,KAAU,IAAsB,GAAY,CACrD,GAAM,CAAC,CAAE,GAAM,EAAO,YAAa,MAAM,aACzC,AAAI,GAAmB,gBAAgB,KAAO,IAC5C,GAAY,IAAI,CAAC,EAAI,GAAiB,CAAC,IACvC,EAAO,YAAY,EAAY,IAAI,CAAC,KAKxC,MAAI,GAAY,OAAS,EAChB,EAGF,EAAM,IAAM,CACjB,GAAM,GAAQ,GAAI,GAGlB,SACG,KACC,GAAU,EAAM,KAAK,GAAS,MAE7B,UAAU,GAAU,CACnB,EAAG,OAAS,CAAC,EAGb,OAAW,CAAC,EAAI,IAAe,GAAa,CAC1C,GAAM,GAAQ,EAAW,cAAe,GAClC,EAAQ,EAAW,gBAAgB,KAAO,GAChD,AAAK,EAGH,GAAK,EAAO,GAFZ,GAAK,EAAO,MAOf,EAAM,GAAG,CAAC,GAAG,GACjB,IAAI,CAAC,CAAC,CAAE,KACP,GAAgB,EAAY,KAG7B,KACC,EAAS,IAAM,EAAM,YACrB,QRjFR,GAAI,IAAW,EAaf,YAA2B,EAA0C,CACnE,GAAI,EAAG,mBAAoB,CACzB,GAAM,GAAU,EAAG,mBACnB,GAAI,EAAQ,UAAY,KACtB,MAAO,GAGJ,GAAI,EAAQ,UAAY,KAAO,CAAC,EAAQ,SAAS,OACpD,MAAO,IAAkB,IAqBxB,YACL,EACuB,CACvB,MAAO,IAAiB,GACrB,KACC,EAAI,CAAC,CAAE,WAEE,EACL,WAAY,AAFE,GAAsB,GAEhB,MAAQ,KAGhC,EAAwB,eAiBvB,YACL,EAAiB,EAC8B,CAC/C,GAAM,CAAE,QAAS,GAAU,WAAW,WACtC,MAAO,GAAM,IAAM,CACjB,GAAM,GAAQ,GAAI,GASlB,GARA,EAAM,UAAU,CAAC,CAAE,gBAAiB,CAClC,AAAI,GAAc,EAChB,EAAG,aAAa,WAAY,KAE5B,EAAG,gBAAgB,cAInB,WAAY,cAAe,CAC7B,GAAM,GAAS,EAAG,QAAQ,OAC1B,EAAO,GAAK,UAAU,EAAE,KACxB,EAAO,aACL,GAAsB,EAAO,IAC7B,GAKJ,GAAM,GAAY,EAAG,QAAQ,CAC3B,mCACA,mBACA,KAAK,OACP,GAAI,YAAqB,aAAa,CACpC,GAAM,GAAO,GAAkB,GAG/B,GAAI,MAAO,IAAS,aAClB,GAAU,UAAU,SAAS,aAC7B,GAAQ,0BACP,CACD,GAAM,GAAe,GAAoB,EAAM,EAAI,GAGnD,MAAO,IAAe,GACnB,KACC,EAAI,GAAS,EAAM,KAAK,IACxB,EAAS,IAAM,EAAM,YACrB,EAAI,GAAU,GAAE,IAAK,GAAO,IAC5B,GAAU,GAAiB,GACxB,KACC,GAAU,EAAM,KAAK,GAAS,KAC9B,EAAI,CAAC,CAAE,QAAO,YAAa,GAAS,GACpC,IACA,EAAU,GAAU,EAAS,EAAe,OAQxD,MAAO,IAAe,GACnB,KACC,EAAI,GAAS,EAAM,KAAK,IACxB,EAAS,IAAM,EAAM,YACrB,EAAI,GAAU,GAAE,IAAK,GAAO,OSpI7B,YACL,EAAwB,CAAE,UAAS,UACd,CACrB,GAAI,GAAO,GACX,MAAO,GAGL,EACG,KACC,EAAI,GAAU,EAAO,QAAQ,wBAC7B,EAAO,GAAW,IAAO,GACzB,GAAe,CAAE,OAAQ,OAAQ,OAAQ,MAI7C,EACG,KACC,EAAO,GAAU,GAAU,CAAC,GAC5B,EAAI,IAAM,EAAO,EAAG,MACpB,EAAI,GAAW,EACb,OAAQ,EAAS,OAAS,aAiB7B,YACL,EAAwB,EACQ,CAChC,MAAO,GAAM,IAAM,CACjB,GAAM,GAAQ,GAAI,GAClB,SAAM,UAAU,CAAC,CAAE,SAAQ,YAAa,CACtC,AAAI,IAAW,OACb,EAAG,aAAa,OAAQ,IAExB,EAAG,gBAAgB,QACjB,GACF,EAAG,mBAIA,GAAa,EAAI,GACrB,KACC,EAAI,GAAS,EAAM,KAAK,IACxB,EAAS,IAAM,EAAM,YACrB,EAAI,GAAU,GAAE,IAAK,GAAO,OC3FpC,GAAM,IAAW,EAAE,SAgBZ,YACL,EACkC,CAClC,SAAG,YAAY,IACf,GAAS,YAAY,GAAY,IAG1B,EAAG,CAAE,IAAK,ICIZ,YACL,EACyB,CACzB,GAAM,GAAS,EAAY,iBAAkB,GAC7C,MAAO,GAAM,GAAG,EAAO,IAAI,GAAS,EAAU,EAAO,UAClD,KACC,GAAmB,CACjB,OAAQ,EAAW,aAAa,EAAM,YAIzC,KACC,EAAU,CACR,OAAQ,EAAW,aAAa,EAAO,GAAG,UAiB3C,YACL,EACoC,CACpC,GAAM,GAAY,EAAW,iBAAkB,GAC/C,MAAO,GAAM,IAAM,CACjB,GAAM,GAAQ,GAAI,GAClB,SAAc,CAAC,EAAO,GAAiB,KACpC,KACC,GAAU,EAAG,IACb,GAAU,EAAM,KAAK,GAAS,MAE7B,UAAU,CAGT,KAAK,CAAC,CAAE,WAAW,CACjB,GAAM,GAAS,GAAiB,GAC1B,CAAE,SAAU,GAAe,GAGjC,EAAG,MAAM,YAAY,mBAAoB,GAAG,EAAO,OACnD,EAAG,MAAM,YAAY,uBAAwB,GAAG,OAGhD,EAAU,SAAS,CACjB,SAAU,SACV,KAAM,EAAO,KAKjB,UAAW,CACT,EAAG,MAAM,eAAe,oBACxB,EAAG,MAAM,eAAe,2BAKzB,GAAiB,GACrB,KACC,EAAI,GAAS,EAAM,KAAK,IACxB,EAAS,IAAM,EAAM,YACrB,EAAI,GAAU,GAAE,IAAK,GAAO,OCrE7B,YACL,EAAiB,CAAE,UAAS,UACI,CAChC,MAAO,GAGL,GAAG,EAAY,aAAc,GAC1B,IAAI,GAAS,GAAe,EAAO,CAAE,YAGxC,GAAG,EAAY,qBAAsB,GAClC,IAAI,GAAS,GAAe,IAG/B,GAAG,EAAY,UAAW,GACvB,IAAI,GAAS,GAAa,EAAO,CAAE,UAAS,YAG/C,GAAG,EAAY,cAAe,GAC3B,IAAI,GAAS,GAAiB,KCZ9B,YACL,EAAkB,CAAE,UACA,CACpB,MAAO,GACJ,KACC,EAAU,GAAW,EACnB,EAAG,IACH,EAAG,IAAO,KAAK,GAAM,OAEpB,KACC,EAAI,GAAW,EAAE,UAAS,eAiB7B,YACL,EAAiB,EACc,CAC/B,GAAM,GAAQ,EAAW,cAAe,GACxC,MAAO,GAAM,IAAM,CACjB,GAAM,GAAQ,GAAI,GAClB,SAAM,UAAU,CAAC,CAAE,UAAS,YAAa,CACvC,EAAM,YAAc,EACpB,AAAI,EACF,EAAG,aAAa,gBAAiB,QAEjC,EAAG,gBAAgB,mBAIhB,GAAY,EAAI,GACpB,KACC,EAAI,GAAS,EAAM,KAAK,IACxB,EAAS,IAAM,EAAM,YACrB,EAAI,GAAU,GAAE,IAAK,GAAO,OC7BpC,YAAkB,CAAE,aAAgD,CAClE,GAAI,CAAC,GAAQ,mBACX,MAAO,GAAG,IAGZ,GAAM,GAAa,EAChB,KACC,EAAI,CAAC,CAAE,OAAQ,CAAE,QAAU,GAC3B,GAAY,EAAG,GACf,EAAI,CAAC,CAAC,EAAG,KAAO,CAAC,EAAI,EAAG,IACxB,EAAwB,IAItB,EAAU,EAAc,CAAC,EAAW,IACvC,KACC,EAAO,CAAC,CAAC,CAAE,UAAU,CAAC,CAAE,MAAQ,KAAK,IAAI,EAAI,EAAO,GAAK,KACzD,EAAI,CAAC,CAAC,CAAE,CAAC,MAAgB,GACzB,KAIE,EAAU,GAAY,UAC5B,MAAO,GAAc,CAAC,EAAW,IAC9B,KACC,EAAI,CAAC,CAAC,CAAE,UAAU,KAAY,EAAO,EAAI,KAAO,CAAC,GACjD,IACA,EAAU,GAAU,EAAS,EAAU,EAAG,KAC1C,EAAU,KAgBT,YACL,EAAiB,EACG,CACpB,MAAO,GAAM,IAAM,CACjB,GAAM,GAAS,iBAAiB,GAChC,MAAO,GACL,EAAO,WAAa,UACpB,EAAO,WAAa,oBAGrB,KACC,GAAkB,GAAiB,GAAK,GAAS,IACjD,EAAI,CAAC,CAAC,EAAQ,CAAE,UAAU,KAAa,EACrC,OAAQ,EAAS,EAAS,EAC1B,SACA,YAEF,EAAqB,CAAC,EAAG,IACvB,EAAE,SAAW,EAAE,QACf,EAAE,SAAW,EAAE,QACf,EAAE,SAAW,EAAE,QAEjB,EAAY,IAeX,YACL,EAAiB,CAAE,UAAS,SACG,CAC/B,MAAO,GAAM,IAAM,CACjB,GAAM,GAAQ,GAAI,GAClB,SACG,KACC,EAAwB,UACxB,GAAkB,IAEjB,UAAU,CAAC,CAAC,CAAE,UAAU,CAAE,aAAc,CACvC,AAAI,EACF,EAAG,aAAa,gBAAiB,EAAS,SAAW,UAErD,EAAG,gBAAgB,mBAI3B,EAAM,UAAU,GAGT,EACJ,KACC,GAAU,EAAM,KAAK,GAAS,KAC9B,EAAI,GAAU,GAAE,IAAK,GAAO,OCrH7B,YACL,EAAiB,CAAE,YAAW,WACL,CACzB,MAAO,IAAgB,EAAI,CAAE,YAAW,YACrC,KACC,EAAI,CAAC,CAAE,OAAQ,CAAE,QAAU,CACzB,GAAM,CAAE,UAAW,GAAe,GAClC,MAAO,CACL,OAAQ,GAAK,KAGjB,EAAwB,WAevB,YACL,EAAiB,EACmB,CACpC,MAAO,GAAM,IAAM,CACjB,GAAM,GAAQ,GAAI,GAClB,EAAM,UAAU,CAAC,CAAE,YAAa,CAC9B,AAAI,EACF,EAAG,aAAa,gBAAiB,UAEjC,EAAG,gBAAgB,mBAIvB,GAAM,GAAU,GAAmB,cACnC,MAAI,OAAO,IAAY,YACd,EAGF,GAAiB,EAAS,GAC9B,KACC,EAAI,GAAS,EAAM,KAAK,IACxB,EAAS,IAAM,EAAM,YACrB,EAAI,GAAU,GAAE,IAAK,GAAO,OCvD7B,YACL,EAAiB,CAAE,YAAW,WACZ,CAGlB,GAAM,GAAU,EACb,KACC,EAAI,CAAC,CAAE,YAAa,GACpB,KAIE,EAAU,EACb,KACC,EAAU,IAAM,GAAiB,GAC9B,KACC,EAAI,CAAC,CAAE,YAAc,EACnB,IAAQ,EAAG,UACX,OAAQ,EAAG,UAAY,KAEzB,EAAwB,aAMhC,MAAO,GAAc,CAAC,EAAS,EAAS,IACrC,KACC,EAAI,CAAC,CAAC,EAAQ,CAAE,MAAK,UAAU,CAAE,OAAQ,CAAE,KAAK,KAAM,CAAE,cACtD,GAAS,KAAK,IAAI,EAAG,EACjB,KAAK,IAAI,EAAG,EAAS,EAAI,GACzB,KAAK,IAAI,EAAG,EAAS,EAAI,IAEtB,CACL,OAAQ,EAAM,EACd,SACA,OAAQ,EAAM,GAAU,KAG5B,EAAqB,CAAC,EAAG,IACvB,EAAE,SAAW,EAAE,QACf,EAAE,SAAW,EAAE,QACf,EAAE,SAAW,EAAE,SChDhB,YACL,EACqB,CACrB,GAAM,GAAU,SAAkB,cAAgB,CAChD,MAAO,EAAO,UAAU,GAAS,WAC/B,EAAM,aAAa,wBACnB,UAIJ,MAAO,GAAG,GAAG,GACV,KACC,GAAS,GAAS,EAAU,EAAO,UAChC,KACC,GAAM,KAGV,EAAU,EAAO,KAAK,IAAI,EAAG,EAAQ,SACrC,EAAI,GAAU,EACZ,MAAO,EAAO,QAAQ,GACtB,MAAO,CACL,OAAS,EAAM,aAAa,wBAC5B,QAAS,EAAM,aAAa,yBAC5B,OAAS,EAAM,aAAa,4BAGhC,EAAY,IAWX,YACL,EACgC,CAChC,MAAO,GAAM,IAAM,CACjB,GAAM,GAAQ,GAAI,GAClB,EAAM,UAAU,GAAW,CAGzB,OAAW,CAAC,EAAK,IAAU,QAAO,QAAQ,EAAQ,OAChD,SAAS,KAAK,aAAa,iBAAiB,IAAO,GAGrD,OAAS,GAAQ,EAAG,EAAQ,EAAO,OAAQ,IAAS,CAClD,GAAM,GAAQ,EAAO,GAAO,mBAC5B,AAAI,YAAiB,cACnB,GAAM,OAAS,EAAQ,QAAU,GAIrC,SAAS,YAAa,KAIxB,GAAM,GAAS,EAA8B,QAAS,GACtD,MAAO,IAAa,GACjB,KACC,EAAI,GAAS,EAAM,KAAK,IACxB,EAAS,IAAM,EAAM,YACrB,EAAI,GAAU,GAAE,IAAK,GAAO,OCpHpC,OAAwB,SAiCxB,YAAiB,EAAyB,CACxC,EAAG,aAAa,kBAAmB,IACnC,GAAM,GAAO,EAAG,UAChB,SAAG,gBAAgB,mBACZ,EAYF,YACL,CAAE,UACI,CACN,AAAI,WAAY,eACd,GAAI,GAA8B,GAAc,CAC9C,GAAI,YAAY,iDAAkD,CAChE,KAAM,GACJ,EAAG,aAAa,wBAChB,GAAQ,EACN,EAAG,aAAa,6BAInB,GAAG,UAAW,GAAM,EAAW,KAAK,MAEtC,KACC,EAAI,GAAM,CAER,AADgB,EAAG,QACX,UAEV,GAAM,EAAY,sBAEjB,UAAU,GCKnB,YAAoB,EAA0B,CAC5C,GAAI,EAAK,OAAS,EAChB,MAAO,GAGT,GAAM,CAAC,EAAM,GAAQ,EAClB,KAAK,CAAC,EAAG,IAAM,EAAE,OAAS,EAAE,QAC5B,IAAI,GAAO,EAAI,QAAQ,SAAU,KAGhC,EAAQ,EACZ,GAAI,IAAS,EACX,EAAQ,EAAK,WAEb,MAAO,EAAK,WAAW,KAAW,EAAK,WAAW,IAChD,IAGJ,GAAM,GAAS,KACf,MAAO,GAAK,IAAI,GACd,EAAI,QAAQ,EAAK,MAAM,EAAG,GAAQ,EAAO,OA6BtC,YACL,CAAE,YAAW,YAAW,aAClB,CACN,GAAM,GAAS,KACf,GAAI,SAAS,WAAa,QACxB,OAGF,AAAI,qBAAuB,UACzB,SAAQ,kBAAoB,SAG5B,EAAU,OAAQ,gBACf,UAAU,IAAM,CACf,QAAQ,kBAAoB,UAKlC,GAAM,GAAU,GAAoC,kBACpD,AAAI,MAAO,IAAY,aACrB,GAAQ,KAAO,EAAQ,MAGzB,GAAM,GAAQ,GAAW,GAAI,KAAI,cAAe,EAAO,OACpD,KACC,EAAI,GAAW,GAAW,EAAY,MAAO,GAC1C,IAAI,GAAQ,EAAK,eAEpB,EAAU,GAAQ,EAAsB,SAAS,KAAM,SACpD,KACC,EAAO,GAAM,CAAC,EAAG,SAAW,CAAC,EAAG,SAChC,EAAU,GAAM,CAGd,GAAI,EAAG,iBAAkB,SAAS,CAChC,GAAM,GAAK,EAAG,OAAO,QAAQ,KAC7B,GAAI,GAAM,CAAC,EAAG,OAAQ,CACpB,GAAM,GAAM,GAAI,KAAI,EAAG,MAOvB,GAJA,EAAI,OAAS,GACb,EAAI,KAAO,GAIT,EAAI,WAAa,SAAS,UAC1B,EAAK,SAAS,EAAI,YAElB,SAAG,iBACI,EAAG,CACR,IAAK,GAAI,KAAI,EAAG,SAKxB,MAAO,QAIb,MAIE,EAAO,EAAyB,OAAQ,YAC3C,KACC,EAAO,GAAM,EAAG,QAAU,MAC1B,EAAI,GAAO,EACT,IAAK,GAAI,KAAI,SAAS,MACtB,OAAQ,EAAG,SAEb,MAIJ,EAAM,EAAO,GACV,KACC,EAAqB,CAAC,EAAG,IAAM,EAAE,IAAI,OAAS,EAAE,IAAI,MACpD,EAAI,CAAC,CAAE,SAAU,IAEhB,UAAU,GAGf,GAAM,GAAY,EACf,KACC,EAAwB,YACxB,EAAU,GAAO,GAAQ,EAAI,MAC1B,KACC,GAAW,IACT,IAAY,GACL,OAIb,MAIJ,EACG,KACC,GAAO,IAEN,UAAU,CAAC,CAAE,SAAU,CACtB,QAAQ,UAAU,GAAI,GAAI,GAAG,OAInC,GAAM,GAAM,GAAI,WAChB,EACG,KACC,EAAU,GAAO,EAAI,QACrB,EAAI,GAAO,EAAI,gBAAgB,EAAK,eAEnC,UAAU,GAGf,EACG,KACC,GAAK,IAEJ,UAAU,GAAe,CACxB,OAAW,KAAY,CAGrB,QACA,sBACA,oBACA,yBAGA,+BACA,gCACA,mCACA,2BACA,2BACA,GAAG,GAAQ,0BACP,CAAC,4BACD,IACH,CACD,GAAM,GAAS,GAAmB,GAC5B,EAAS,GAAmB,EAAU,GAC5C,AACE,MAAO,IAAW,aAClB,MAAO,IAAW,aAElB,EAAO,YAAY,MAM7B,EACG,KACC,GAAK,GACL,EAAI,IAAM,GAAoB,cAC9B,EAAU,GAAM,EAAG,GAAG,EAAY,SAAU,KAC5C,GAAU,GAAM,CACd,GAAM,GAAS,EAAE,UACjB,GAAI,EAAG,IAAK,CACV,OAAW,KAAQ,GAAG,oBACpB,EAAO,aAAa,EAAM,EAAG,aAAa,IAC5C,SAAG,YAAY,GAGR,GAAI,GAAW,GAAY,CAChC,EAAO,OAAS,IAAM,EAAS,iBAKjC,UAAO,YAAc,EAAG,YACxB,EAAG,YAAY,GACR,KAIV,YAGL,EAAM,EAAO,GACV,KACC,GAAO,IAEN,UAAU,CAAC,CAAE,MAAK,YAAa,CAC9B,AAAI,EAAI,MAAQ,CAAC,EACf,GAAgB,EAAI,MAEpB,OAAO,SAAS,EAAG,kBAAQ,IAAK,KAKxC,EACG,KACC,GAAU,GACV,GAAa,KACb,EAAwB,WAEvB,UAAU,CAAC,CAAE,YAAa,CACzB,QAAQ,aAAa,EAAQ,MAInC,EAAM,EAAO,GACV,KACC,GAAY,EAAG,GACf,EAAO,CAAC,CAAC,EAAG,KAAO,EAAE,IAAI,WAAa,EAAE,IAAI,UAC5C,EAAI,CAAC,CAAC,CAAE,KAAW,IAElB,UAAU,CAAC,CAAE,YAAa,CACzB,OAAO,SAAS,EAAG,kBAAQ,IAAK,KC/UxC,OAAuB,SCAvB,OAAuB,SAsChB,YACL,EAA2B,EACD,CAC1B,GAAM,GAAY,GAAI,QAAO,EAAO,UAAW,OACzC,EAAY,CAAC,EAAY,EAAc,IACpC,GAAG,4BAA+B,WAI3C,MAAO,AAAC,IAAkB,CACxB,EAAQ,EACL,QAAQ,gBAAiB,KACzB,OAGH,GAAM,GAAQ,GAAI,QAAO,MAAM,EAAO,cACpC,EACG,QAAQ,uBAAwB,QAChC,QAAQ,EAAW,QACnB,OAGL,MAAO,IACL,GACI,eAAW,GACX,GAED,QAAQ,EAAO,GACf,QAAQ,8BAA+B,OC5BzC,YAA0B,EAAuB,CACtD,MAAO,GACJ,MAAM,cACJ,IAAI,CAAC,EAAO,IAAU,EAAQ,EAC3B,EAAM,QAAQ,+BAAgC,MAC9C,GAEH,KAAK,IACP,QAAQ,kCAAmC,IAC3C,OCqCE,YACL,EAC+B,CAC/B,MAAO,GAAQ,OAAS,EAUnB,YACL,EAC+B,CAC/B,MAAO,GAAQ,OAAS,EAUnB,YACL,EACgC,CAChC,MAAO,GAAQ,OAAS,ECtE1B,YAA0B,CAAE,SAAQ,QAAkC,CAGpE,AAAI,EAAO,KAAK,SAAW,GAAK,EAAO,KAAK,KAAO,MACjD,GAAO,KAAO,CACZ,EAAY,wBAIZ,EAAO,YAAc,aACvB,GAAO,UAAY,EAAY,4BAQjC,GAAM,GAAyB,CAC7B,SANe,EAAY,0BAC1B,MAAM,WACN,OAAO,SAKR,YAAa,GAAQ,mBAIvB,MAAO,CAAE,SAAQ,OAAM,WAmBlB,YACL,EAAa,EACC,CACd,GAAM,GAAS,KACT,EAAS,GAAI,QAAO,GAGpB,EAAM,GAAI,GACV,EAAM,GAAY,EAAQ,CAAE,QAC/B,KACC,EAAI,GAAW,CACb,GAAI,GAAsB,GACxB,OAAW,KAAU,GAAQ,KAAK,MAChC,OAAW,KAAY,GACrB,EAAS,SAAW,GAAG,GAAI,KAAI,EAAS,SAAU,EAAO,QAE/D,MAAO,KAET,MAIJ,UAAK,GACF,KACC,EAAI,GAAS,EACX,KAAM,EACN,KAAM,GAAiB,OAGxB,UAAU,EAAI,KAAK,KAAK,IAGtB,CAAE,MAAK,OClGT,aAAsC,CAC3C,GAAM,GAAS,KACT,EAAY,GAChB,GAAI,KAAI,mBAAoB,EAAO,OAI/B,EAAW,EACd,KACC,EAAI,GAAY,CACd,GAAM,CAAC,CAAE,GAAW,EAAO,KAAK,MAAM,eACtC,MAAO,GAAS,KAAK,CAAC,CAAE,UAAS,aAC/B,IAAY,GAAW,EAAQ,SAAS,KACpC,EAAS,MAKrB,EAAc,CAAC,EAAW,IACvB,UAAU,CAAC,CAAC,EAAU,KAAa,CA7DxC,MAkEM,GAHA,AADc,EAAW,qBACnB,YAAY,GAAsB,EAAU,IAG9C,SAAS,aAAc,kBAAoB,KAAM,CACnD,GAAM,GAAS,MAAO,UAAP,cAAgB,UAAW,SACpC,EAAW,CAAC,EAAQ,QAAQ,SAAS,GAI3C,GADA,SAAS,aAAc,EAAU,gBAC7B,EACF,OAAW,KAAW,IAAqB,YACzC,EAAQ,OAAS,MCWtB,YACL,EAAsB,CAAE,OACC,CACzB,GAAM,GAAK,gCAAU,YAAa,GAG5B,CAAE,gBAAiB,KACzB,AAAI,EAAa,IAAI,MACnB,GAAU,SAAU,IAGtB,GAAM,GAAS,EACZ,KACC,EAAO,IACP,GAAK,GACL,EAAI,IAAM,EAAa,IAAI,MAAQ,KAIvC,EAAO,UAAU,GAAS,CACxB,AAAI,GACF,GAAG,MAAQ,KAIf,GAAM,GAAS,GAAkB,GAC3B,EAAS,EACb,EAAU,EAAI,SACd,EAAU,EAAI,SAAS,KAAK,GAAM,IAClC,GAEC,KACC,EAAI,IAAM,EAAG,EAAG,QAChB,EAAU,IACV,KAIJ,MAAO,GAAc,CAAC,EAAQ,IAC3B,KACC,EAAI,CAAC,CAAC,EAAO,KAAY,EAAE,QAAO,WAClC,EAAY,IAYX,YACL,EAAsB,CAAE,MAAK,OACyB,CACtD,GAAM,GAAQ,GAAI,GAGlB,SACG,KACC,EAAwB,SACxB,EAAI,CAAC,CAAE,WAAiC,EACtC,KAAM,EACN,KAAM,MAGP,UAAU,EAAI,KAAK,KAAK,IAG7B,EACG,KACC,EAAwB,UAEvB,UAAU,CAAC,CAAE,WAAY,CACxB,AAAI,EACF,IAAU,SAAU,GACpB,EAAG,YAAc,IAEjB,EAAG,YAAc,EAAY,wBAKrC,EAAU,EAAG,KAAO,SACjB,KACC,GAAU,EAAM,KAAK,GAAS,MAE7B,UAAU,IAAM,EAAG,SAGjB,GAAiB,EAAI,CAAE,MAAK,QAChC,KACC,EAAI,GAAS,EAAM,KAAK,IACxB,EAAS,IAAM,EAAM,YACrB,EAAI,GAAU,GAAE,IAAK,GAAO,KCjG3B,YACL,EAAiB,CAAE,OAAqB,CAAE,UACL,CACrC,GAAM,GAAQ,GAAI,GACZ,EAAY,GAAqB,EAAG,eACvC,KACC,EAAO,UAIL,EAAO,EAAW,wBAAyB,GAC3C,EAAO,EAAW,uBAAwB,GAG1C,EAAS,EACZ,KACC,EAAO,IACP,GAAK,IAIT,SACG,KACC,GAAe,GACf,GAAU,IAET,UAAU,CAAC,CAAC,CAAE,SAAS,CAAE,YAAa,CACrC,GAAI,EACF,OAAQ,EAAM,YAGP,GACH,EAAK,YAAc,EAAY,sBAC/B,UAGG,GACH,EAAK,YAAc,EAAY,qBAC/B,cAIA,EAAK,YAAc,EACjB,sBACA,GAAM,EAAM,aAIlB,GAAK,YAAc,EAAY,+BAKvC,EACG,KACC,EAAI,IAAM,EAAK,UAAY,IAC3B,EAAU,CAAC,CAAE,WAAY,EACvB,EAAG,GAAG,EAAM,MAAM,EAAG,KACrB,EAAG,GAAG,EAAM,MAAM,KACf,KACC,GAAY,GACZ,GAAQ,GACR,EAAU,CAAC,CAAC,KAAW,EAAG,GAAG,QAIlC,UAAU,GAAU,EAAK,YACxB,GAAuB,KAWtB,AAPS,EACb,KACC,EAAO,IACP,EAAI,CAAC,CAAE,UAAW,IAKnB,KACC,EAAI,GAAS,EAAM,KAAK,IACxB,EAAS,IAAM,EAAM,YACrB,EAAI,GAAU,GAAE,IAAK,GAAO,KCxF3B,YACL,EAAkB,CAAE,UACK,CACzB,MAAO,GACJ,KACC,EAAI,CAAC,CAAE,WAAY,CACjB,GAAM,GAAM,KACZ,SAAI,KAAO,GACX,EAAI,aAAa,OAAO,KACxB,EAAI,aAAa,IAAI,IAAK,GACnB,CAAE,UAaV,YACL,EAAuB,EACa,CACpC,GAAM,GAAQ,GAAI,GAClB,SAAM,UAAU,CAAC,CAAE,SAAU,CAC3B,EAAG,aAAa,sBAAuB,EAAG,MAC1C,EAAG,KAAO,GAAG,MAIf,EAAU,EAAI,SACX,UAAU,GAAM,EAAG,kBAGf,GAAiB,EAAI,GACzB,KACC,EAAI,GAAS,EAAM,KAAK,IACxB,EAAS,IAAM,EAAM,YACrB,EAAI,GAAU,GAAE,IAAK,GAAO,KCpC3B,YACL,EAAiB,CAAE,OAAqB,CAAE,aACJ,CACtC,GAAM,GAAQ,GAAI,GAGZ,EAAS,GAAoB,gBAC7B,EAAS,EACb,EAAU,EAAO,WACjB,EAAU,EAAO,UAEhB,KACC,GAAU,IACV,EAAI,IAAM,EAAM,OAChB,KAIJ,SACG,KACC,GAAkB,GAClB,EAAI,CAAC,CAAC,CAAE,eAAe,KAAW,CAChC,GAAM,GAAQ,EAAM,MAAM,YAC1B,GAAI,kBAAa,SAAU,EAAM,EAAM,OAAS,GAAI,CAClD,GAAM,GAAO,EAAY,EAAY,OAAS,GAC9C,AAAI,EAAK,WAAW,EAAM,EAAM,OAAS,KACvC,GAAM,EAAM,OAAS,GAAK,OAE5B,GAAM,OAAS,EAEjB,MAAO,MAGR,UAAU,GAAS,EAAG,UAAY,EAChC,KAAK,IACL,QAAQ,MAAO,WAItB,EACG,KACC,EAAO,CAAC,CAAE,UAAW,IAAS,WAE7B,UAAU,GAAO,CAChB,OAAQ,EAAI,UAGL,aACH,AACE,EAAG,UAAU,QACb,EAAM,iBAAmB,EAAM,MAAM,QAErC,GAAM,MAAQ,EAAG,WACnB,SAYH,AAPS,EACb,KACC,EAAO,IACP,EAAI,CAAC,CAAE,UAAW,IAKnB,KACC,EAAI,GAAS,EAAM,KAAK,IACxB,EAAS,IAAM,EAAM,YACrB,EAAI,IAAO,EAAE,IAAK,MC5CjB,YACL,EAAiB,CAAE,SAAQ,aACI,CAC/B,GAAM,GAAS,KACf,GAAI,CACF,GAAM,GAAM,gCAAU,SAAU,EAAO,OACjC,EAAS,GAAkB,EAAK,GAGhC,EAAS,GAAoB,eAAgB,GAC7C,EAAS,GAAoB,gBAAiB,GAG9C,CAAE,MAAK,OAAQ,EACrB,EACG,KACC,EAAO,IACP,GAAO,EAAI,KAAK,EAAO,MACvB,GAAK,IAEJ,UAAU,EAAI,KAAK,KAAK,IAG7B,EACG,KACC,EAAO,CAAC,CAAE,UAAW,IAAS,WAE7B,UAAU,GAAO,CAChB,GAAM,GAAS,KACf,OAAQ,EAAI,UAGL,QACH,GAAI,IAAW,EAAO,CACpB,GAAM,GAAU,GAAI,KACpB,OAAW,KAAU,GACnB,sBAAuB,GACtB,CACD,GAAM,GAAU,EAAO,kBACvB,EAAQ,IAAI,EAAQ,WAClB,EAAQ,aAAa,mBAKzB,GAAI,EAAQ,KAAM,CAChB,GAAM,CAAC,CAAC,IAAS,CAAC,GAAG,GAAS,KAAK,CAAC,CAAC,CAAE,GAAI,CAAC,CAAE,KAAO,EAAI,GACzD,EAAK,QAIP,EAAI,QAEN,UAGG,aACA,MACH,GAAU,SAAU,IACpB,EAAM,OACN,UAGG,cACA,YACH,GAAI,MAAO,IAAW,YACpB,EAAM,YACD,CACL,GAAM,GAAM,CAAC,EAAO,GAAG,EACrB,wDACA,IAEI,EAAI,KAAK,IAAI,EACjB,MAAK,IAAI,EAAG,EAAI,QAAQ,IAAW,EAAI,OACrC,GAAI,OAAS,UAAY,GAAK,IAE9B,EAAI,QACR,EAAI,GAAG,QAIT,EAAI,QACJ,cAIA,AAAI,IAAU,MACZ,EAAM,WAKlB,EACG,KACC,EAAO,CAAC,CAAE,UAAW,IAAS,WAE7B,UAAU,GAAO,CAChB,OAAQ,EAAI,UAGL,QACA,QACA,IACH,EAAM,QACN,EAAM,SAGN,EAAI,QACJ,SAKV,GAAM,GAAU,GAAiB,EAAO,GAClC,EAAU,GAAkB,EAAQ,EAAQ,CAAE,WACpD,MAAO,GAAM,EAAQ,GAClB,KACC,GAGE,GAAG,GAAqB,eAAgB,GACrC,IAAI,GAAS,GAAiB,EAAO,CAAE,YAG1C,GAAG,GAAqB,iBAAkB,GACvC,IAAI,GAAS,GAAmB,EAAO,EAAQ,CAAE,uBAKnD,EAAP,CACA,SAAG,OAAS,GACL,ICpKJ,YACL,EAAiB,CAAE,SAAQ,aACa,CACxC,MAAO,GAAc,CACnB,EACA,EACG,KACC,EAAU,MACV,EAAO,GAAO,CAAC,CAAC,EAAI,aAAa,IAAI,SAGxC,KACC,EAAI,CAAC,CAAC,EAAO,KAAS,GAAuB,EAAM,OAAQ,IACzD,EAAI,aAAa,IAAI,OAEvB,EAAI,GAAM,CA1FhB,MA2FQ,GAAM,GAAQ,GAAI,KAGZ,EAAK,SAAS,mBAAmB,EAAI,WAAW,WACtD,OAAS,GAAO,EAAG,WAAY,EAAM,EAAO,EAAG,WAC7C,GAAI,KAAK,gBAAL,cAAoB,aAAc,CACpC,GAAM,GAAW,EAAK,YAChB,EAAW,EAAG,GACpB,AAAI,EAAS,OAAS,EAAS,QAC7B,EAAM,IAAI,EAAmB,GAKnC,OAAW,CAAC,EAAM,IAAS,GAAO,CAChC,GAAM,CAAE,cAAe,EAAE,OAAQ,KAAM,GACvC,EAAK,YAAY,GAAG,MAAM,KAAK,IAIjC,MAAO,CAAE,IAAK,EAAI,YCfnB,YACL,EAAiB,CAAE,YAAW,SACT,CACrB,GAAM,GAAS,EAAG,cACZ,EACJ,EAAO,UACP,EAAO,cAAe,UAGxB,MAAO,GAAc,CAAC,EAAO,IAC1B,KACC,EAAI,CAAC,CAAC,CAAE,SAAQ,UAAU,CAAE,OAAQ,CAAE,SACpC,GAAS,EACL,KAAK,IAAI,EAAQ,KAAK,IAAI,EAAG,EAAI,IACjC,EACG,CACL,SACA,OAAQ,GAAK,EAAS,KAG1B,EAAqB,CAAC,EAAG,IACvB,EAAE,SAAW,EAAE,QACf,EAAE,SAAW,EAAE,SA0BhB,YACL,EAAiB,EACe,CADf,QAAE,YAAF,EAAc,KAAd,EAAc,CAAZ,YAEnB,GAAM,GAAQ,EAAW,0BAA2B,GAC9C,CAAE,KAAM,GAAiB,GAC/B,MAAO,GAAM,IAAM,CACjB,GAAM,GAAQ,GAAI,GAClB,SACG,KACC,GAAU,EAAG,IACb,GAAe,IAEd,UAAU,CAGT,KAAK,CAAC,CAAE,UAAU,CAAE,OAAQ,IAAW,CACrC,EAAM,MAAM,OAAS,GAAG,EAAS,EAAI,MACrC,EAAG,MAAM,IAAY,GAAG,OAI1B,UAAW,CACT,EAAM,MAAM,OAAS,GACrB,EAAG,MAAM,IAAY,MAKtB,GAAa,EAAI,GACrB,KACC,EAAI,GAAS,EAAM,KAAK,IACxB,EAAS,IAAM,EAAM,YACrB,EAAI,GAAU,GAAE,IAAK,GAAO,OCvH7B,YACL,EAAc,EACW,CACzB,GAAI,MAAO,IAAS,YAAa,CAC/B,GAAM,GAAM,gCAAgC,KAAQ,IACpD,MAAO,IAGL,GAAqB,GAAG,qBACrB,KACC,EAAI,GAAY,EACd,QAAS,EAAQ,YAEnB,GAAe,KAInB,GAAkB,GACf,KACC,EAAI,GAAS,EACX,MAAO,EAAK,iBACZ,MAAO,EAAK,eAEd,GAAe,MAGlB,KACC,EAAI,CAAC,CAAC,EAAS,KAAW,OAAK,GAAY,SAI1C,CACL,GAAM,GAAM,gCAAgC,IAC5C,MAAO,IAAkB,GACtB,KACC,EAAI,GAAS,EACX,aAAc,EAAK,gBAErB,GAAe,MClDhB,YACL,EAAc,EACW,CACzB,GAAM,GAAM,WAAW,qBAAwB,mBAAmB,KAClE,MAAO,IAA2B,GAC/B,KACC,EAAI,CAAC,CAAE,aAAY,iBAAmB,EACpC,MAAO,EACP,MAAO,KAET,GAAe,KCYd,YACL,EACyB,CACzB,GAAM,CAAC,GAAQ,EAAI,MAAM,sBAAwB,GACjD,OAAQ,EAAK,mBAGN,SACH,GAAM,CAAC,CAAE,EAAM,GAAQ,EAAI,MAAM,uCACjC,MAAO,IAA2B,EAAM,OAGrC,SACH,GAAM,CAAC,CAAE,EAAM,GAAQ,EAAI,MAAM,sCACjC,MAAO,IAA2B,EAAM,WAIxC,MAAO,ICtBb,GAAI,IAgBG,YACL,EACoB,CACpB,MAAO,SAAW,EAAM,IAAM,CAC5B,GAAM,GAAS,SAAsB,WAAY,gBACjD,MAAI,GACK,EAAG,GAEH,GAAiB,EAAG,MACxB,KACC,EAAI,GAAS,SAAS,WAAY,EAAO,oBAG9C,KACC,GAAW,IAAM,GACjB,EAAO,GAAS,OAAO,KAAK,GAAO,OAAS,GAC5C,EAAI,GAAU,EAAE,WAChB,EAAY,KAWX,YACL,EAC+B,CAC/B,GAAM,GAAQ,EAAW,uBAAwB,GACjD,MAAO,GAAM,IAAM,CACjB,GAAM,GAAQ,GAAI,GAClB,SAAM,UAAU,CAAC,CAAE,WAAY,CAC7B,EAAM,YAAY,GAAkB,IACpC,EAAM,aAAa,gBAAiB,UAI/B,GAAY,GAChB,KACC,EAAI,GAAS,EAAM,KAAK,IACxB,EAAS,IAAM,EAAM,YACrB,EAAI,GAAU,GAAE,IAAK,GAAO,OCpC7B,YACL,EAAiB,CAAE,YAAW,WACZ,CAClB,MAAO,IAAiB,SAAS,MAC9B,KACC,EAAU,IAAM,GAAgB,EAAI,CAAE,UAAS,eAC/C,EAAI,CAAC,CAAE,OAAQ,CAAE,QACR,EACL,OAAQ,GAAK,MAGjB,EAAwB,WAevB,YACL,EAAiB,EACY,CAC7B,MAAO,GAAM,IAAM,CACjB,GAAM,GAAQ,GAAI,GAClB,SAAM,UAAU,CAGd,KAAK,CAAE,UAAU,CACf,AAAI,EACF,EAAG,aAAa,gBAAiB,UAEjC,EAAG,gBAAgB,kBAIvB,UAAW,CACT,EAAG,gBAAgB,oBAMrB,IAAQ,0BACJ,EAAG,CAAE,OAAQ,KACb,GAAU,EAAI,IAEjB,KACC,EAAI,GAAS,EAAM,KAAK,IACxB,EAAS,IAAM,EAAM,YACrB,EAAI,GAAU,GAAE,IAAK,GAAO,OChC7B,YACL,EAAiB,CAAE,YAAW,WACD,CAC7B,GAAM,GAAQ,GAAI,KAGZ,EAAU,EAA+B,cAAe,GAC9D,OAAW,KAAU,GAAS,CAC5B,GAAM,GAAK,mBAAmB,EAAO,KAAK,UAAU,IAC9C,EAAS,GAAmB,QAAQ,OAC1C,AAAI,MAAO,IAAW,aACpB,EAAM,IAAI,EAAQ,GAItB,GAAM,GAAU,EACb,KACC,EAAI,GAAU,GAAK,EAAO,SAgF9B,MAAO,AA5EY,IAAiB,SAAS,MAC1C,KACC,EAAwB,UAGxB,EAAU,GAAQ,EAAM,IAAM,CAC5B,GAAI,GAA4B,GAChC,MAAO,GAAG,CAAC,GAAG,GAAO,OAAO,CAAC,EAAO,CAAC,EAAQ,KAAY,CACvD,KAAO,EAAK,QAEN,AADS,EAAM,IAAI,EAAK,EAAK,OAAS,IACjC,SAAW,EAAO,SACzB,EAAK,MAOT,GAAI,GAAS,EAAO,UACpB,KAAO,CAAC,GAAU,EAAO,eACvB,EAAS,EAAO,cAChB,EAAS,EAAO,UAIlB,MAAO,GAAM,IACX,CAAC,GAAG,EAAO,CAAC,GAAG,EAAM,IAAS,UAC9B,IAED,GAAI,SAEN,KAGC,EAAI,GAAS,GAAI,KAAI,CAAC,GAAG,GAAO,KAAK,CAAC,CAAC,CAAE,GAAI,CAAC,CAAE,KAAO,EAAI,KAG3D,EAAU,GAAS,EAAc,CAAC,EAAW,IAC1C,KACC,GAAK,CAAC,CAAC,EAAM,GAAO,CAAC,CAAE,OAAQ,CAAE,KAAK,QAAQ,KAAY,CACxD,GAAM,GAAO,EAAI,EAAK,QAAU,KAAK,MAAM,EAAK,QAGhD,KAAO,EAAK,QAAQ,CAClB,GAAM,CAAC,CAAE,GAAU,EAAK,GACxB,GAAI,EAAS,EAAS,GAAK,EACzB,EAAO,CAAC,GAAG,EAAM,EAAK,aAEtB,OAKJ,KAAO,EAAK,QAAQ,CAClB,GAAM,CAAC,CAAE,GAAU,EAAK,EAAK,OAAS,GACtC,GAAI,EAAS,GAAU,GAAK,CAAC,EAC3B,EAAO,CAAC,EAAK,MAAQ,GAAG,OAExB,OAKJ,MAAO,CAAC,EAAM,IACb,CAAC,GAAI,CAAC,GAAG,KACZ,EAAqB,CAAC,EAAG,IACvB,EAAE,KAAO,EAAE,IACX,EAAE,KAAO,EAAE,SAUtB,KACC,EAAI,CAAC,CAAC,EAAM,KAAW,EACrB,KAAM,EAAK,IAAI,CAAC,CAAC,KAAU,GAC3B,KAAM,EAAK,IAAI,CAAC,CAAC,KAAU,MAI7B,EAAU,CAAE,KAAM,GAAI,KAAM,KAC5B,GAAY,EAAG,GACf,EAAI,CAAC,CAAC,EAAG,KAGH,EAAE,KAAK,OAAS,EAAE,KAAK,OAClB,CACL,KAAM,EAAE,KAAK,MAAM,KAAK,IAAI,EAAG,EAAE,KAAK,OAAS,GAAI,EAAE,KAAK,QAC1D,KAAM,IAKD,CACL,KAAM,EAAE,KAAK,MAAM,IACnB,KAAM,EAAE,KAAK,MAAM,EAAG,EAAE,KAAK,OAAS,EAAE,KAAK,WAiBlD,YACL,EAAiB,CAAE,YAAW,WACU,CACxC,MAAO,GAAM,IAAM,CACjB,GAAM,GAAQ,GAAI,GAClB,SAAM,UAAU,CAAC,CAAE,OAAM,UAAW,CAGlC,OAAW,CAAC,IAAW,GACrB,EAAO,gBAAgB,iBACvB,EAAO,UAAU,OACf,wBAKJ,OAAW,CAAC,EAAO,CAAC,KAAY,GAAK,UACnC,EAAO,aAAa,gBAAiB,QACrC,EAAO,UAAU,OACf,uBACA,IAAU,EAAK,OAAS,KAM1B,GAAQ,wBACV,EACG,KACC,GAAU,EAAM,KAAK,GAAS,KAC9B,EAAwB,UACxB,GAAa,KACb,GAAe,IAEd,UAAU,CAAC,CAAC,CAAE,CAAE,WAAY,CAC3B,GAAM,GAAM,KAGN,EAAS,EAAK,EAAK,OAAS,GAClC,GAAI,GAAU,EAAO,OAAQ,CAC3B,GAAM,CAAC,GAAU,EACX,CAAE,QAAS,GAAI,KAAI,EAAO,MAChC,AAAI,EAAI,OAAS,GACf,GAAI,KAAO,EACX,QAAQ,aAAa,GAAI,GAAI,GAAG,UAKlC,GAAI,KAAO,GACX,QAAQ,aAAa,GAAI,GAAI,GAAG,OAKnC,GAAqB,EAAI,CAAE,YAAW,YAC1C,KACC,EAAI,GAAS,EAAM,KAAK,IACxB,EAAS,IAAM,EAAM,YACrB,EAAI,GAAU,GAAE,IAAK,GAAO,OCxN7B,YACL,EAAkB,CAAE,YAAW,QAAO,WACf,CAGvB,GAAM,GAAa,EAChB,KACC,EAAI,CAAC,CAAE,OAAQ,CAAE,QAAU,GAC3B,GAAY,EAAG,GACf,EAAI,CAAC,CAAC,EAAG,KAAO,EAAI,GAAK,EAAI,GAC7B,KAIE,EAAU,EACb,KACC,EAAI,CAAC,CAAE,YAAa,IAIxB,MAAO,GAAc,CAAC,EAAS,IAC5B,KACC,EAAI,CAAC,CAAC,EAAQ,KAAe,CAAE,IAAU,IACzC,IACA,GAAU,EAAQ,KAAK,GAAK,KAC5B,GAAQ,IACR,GAAO,CAAE,MAAO,MAChB,EAAI,GAAW,EAAE,aAchB,YACL,EAAiB,CAAE,YAAW,UAAS,QAAO,WACZ,CAClC,GAAM,GAAQ,GAAI,GAClB,SAAM,UAAU,CAGd,KAAK,CAAE,UAAU,CACf,AAAI,EACF,GAAG,aAAa,gBAAiB,UACjC,EAAG,aAAa,WAAY,MAC5B,EAAG,QAEH,GAAG,gBAAgB,iBACnB,EAAG,gBAAgB,cAKvB,UAAW,CACT,EAAG,MAAM,IAAM,GACf,EAAG,aAAa,gBAAiB,UACjC,EAAG,gBAAgB,eAKvB,EACG,KACC,GAAU,EAAM,KAAK,GAAQ,GAAI,GAAS,KAC1C,EAAwB,WAEvB,UAAU,CAAC,CAAE,YAAa,CACzB,EAAG,MAAM,IAAM,GAAG,EAAS,SAI1B,GAAe,EAAI,CAAE,YAAW,QAAO,YAC3C,KACC,EAAI,GAAS,EAAM,KAAK,IACxB,EAAS,IAAM,EAAM,YACrB,EAAI,GAAU,GAAE,IAAK,GAAO,KCjH3B,YACL,CAAE,YAAW,WACP,CACN,EACG,KACC,EAAU,IAAM,EAAG,GAAG,EACpB,mCAEF,EAAI,GAAM,CACR,EAAG,cAAgB,GACnB,EAAG,QAAU,KAEf,GAAS,GAAM,EAAU,EAAI,UAC1B,KACC,GAAU,IAAM,EAAG,aAAa,kBAChC,GAAM,KAGV,GAAe,IAEd,UAAU,CAAC,CAAC,EAAI,KAAY,CAC3B,EAAG,gBAAgB,iBACf,GACF,GAAG,QAAU,MC5BvB,aAAkC,CAChC,MAAO,qBAAqB,KAAK,UAAU,WAkBtC,YACL,CAAE,aACI,CACN,EACG,KACC,EAAU,IAAM,EAAG,GAAG,EAAY,yBAClC,EAAI,GAAM,EAAG,gBAAgB,sBAC7B,EAAO,IACP,GAAS,GAAM,EAAU,EAAI,cAC1B,KACC,GAAM,MAIT,UAAU,GAAM,CACf,GAAM,GAAM,EAAG,UAGf,AAAI,IAAQ,EACV,EAAG,UAAY,EAGN,EAAM,EAAG,eAAiB,EAAG,cACtC,GAAG,UAAY,EAAM,KClCxB,YACL,CAAE,YAAW,WACP,CACN,EAAc,CAAC,GAAY,UAAW,IACnC,KACC,EAAI,CAAC,CAAC,EAAQ,KAAY,GAAU,CAAC,GACrC,EAAU,GAAU,EAAG,GACpB,KACC,GAAM,EAAS,IAAM,OAGzB,GAAe,IAEd,UAAU,CAAC,CAAC,EAAQ,CAAE,OAAQ,CAAE,SAAU,CACzC,GAAI,EACF,SAAS,KAAK,aAAa,gBAAiB,QAC5C,SAAS,KAAK,MAAM,IAAM,IAAI,UACzB,CACL,GAAM,GAAQ,GAAK,SAAS,SAAS,KAAK,MAAM,IAAK,IACrD,SAAS,KAAK,gBAAgB,iBAC9B,SAAS,KAAK,MAAM,IAAM,GACtB,GACF,OAAO,SAAS,EAAG,MC1D/B,AAAK,OAAO,SACV,QAAO,QAAU,SAAU,EAAa,CACtC,GAAM,GAA2B,GACjC,OAAW,KAAO,QAAO,KAAK,GAE5B,EAAK,KAAK,CAAC,EAAK,EAAI,KAGtB,MAAO,KAIX,AAAK,OAAO,QACV,QAAO,OAAS,SAAU,EAAa,CACrC,GAAM,GAAiB,GACvB,OAAW,KAAO,QAAO,KAAK,GAE5B,EAAK,KAAK,EAAI,IAGhB,MAAO,KAMX,AAAI,MAAO,UAAY,aAGhB,SAAQ,UAAU,UACrB,SAAQ,UAAU,SAAW,SAC3B,EAA8B,EACxB,CACN,AAAI,MAAO,IAAM,SACf,MAAK,WAAa,EAAE,KACpB,KAAK,UAAY,EAAE,KAEnB,MAAK,WAAa,EAClB,KAAK,UAAY,KAKlB,QAAQ,UAAU,aACrB,SAAQ,UAAU,YAAc,YAC3B,EACG,CACN,GAAM,GAAS,KAAK,WACpB,GAAI,EAAQ,CACV,AAAI,EAAM,SAAW,GACnB,EAAO,YAAY,MAGrB,OAAS,GAAI,EAAM,OAAS,EAAG,GAAK,EAAG,IAAK,CAC1C,GAAI,GAAO,EAAM,GACjB,AAAI,MAAO,IAAS,SAClB,EAAO,SAAS,eAAe,GACxB,EAAK,YACZ,EAAK,WAAW,YAAY,GAG9B,AAAK,EAGH,EAAO,aAAa,KAAK,gBAAkB,GAF3C,EAAO,aAAa,EAAM,W1LEtC,SAAS,gBAAgB,UAAU,OAAO,SAC1C,SAAS,gBAAgB,UAAU,IAAI,MAGvC,GAAM,IAAY,KACZ,GAAY,KACZ,GAAY,KACZ,GAAY,KAGZ,GAAY,KACZ,GAAY,GAAW,sBACvB,GAAY,GAAW,uBACvB,GAAY,KAGZ,GAAS,KACT,GAAS,SAAS,MAAM,UAAU,UACpC,gCAAU,QAAS,GACnB,GAAI,KAAI,2BAA4B,GAAO,OAE3C,GAGE,GAAS,GAAI,GACnB,GAAiB,CAAE,YAGnB,AAAI,GAAQ,uBACV,GAAoB,CAAE,aAAW,aAAW,eAxH9C,OA2HA,AAAI,QAAO,UAAP,eAAgB,YAAa,QAC/B,KAGF,EAAM,GAAW,IACd,KACC,GAAM,MAEL,UAAU,IAAM,CACf,GAAU,SAAU,IACpB,GAAU,SAAU,MAI1B,GACG,KACC,EAAO,CAAC,CAAE,UAAW,IAAS,WAE7B,UAAU,GAAO,CAChB,OAAQ,EAAI,UAGL,QACA,IACH,GAAM,GAAO,GAAmB,oBAChC,AAAI,MAAO,IAAS,aAClB,EAAK,QACP,UAGG,QACA,IACH,GAAM,GAAO,GAAmB,oBAChC,AAAI,MAAO,IAAS,aAClB,EAAK,QACP,SAKV,GAAmB,CAAE,aAAW,aAChC,GAAe,CAAE,eACjB,GAAgB,CAAE,aAAW,aAG7B,GAAM,IAAU,GAAY,GAAoB,UAAW,CAAE,eACvD,GAAQ,GACX,KACC,EAAI,IAAM,GAAoB,SAC9B,EAAU,GAAM,GAAU,EAAI,CAAE,aAAW,cAC3C,EAAY,IAIV,GAAW,EAGf,GAAG,GAAqB,UACrB,IAAI,GAAM,GAAY,EAAI,CAAE,aAG/B,GAAG,GAAqB,UACrB,IAAI,GAAM,GAAY,EAAI,CAAE,aAAW,WAAS,YAGnD,GAAG,GAAqB,WACrB,IAAI,GAAM,GAAa,IAG1B,GAAG,GAAqB,UACrB,IAAI,GAAM,GAAY,EAAI,CAAE,UAAQ,gBAGvC,GAAG,GAAqB,UACrB,IAAI,GAAM,GAAY,KAIrB,GAAW,EAAM,IAAM,EAG3B,GAAG,GAAqB,WACrB,IAAI,GAAM,GAAa,EAAI,CAAE,WAAS,aAGzC,GAAG,GAAqB,WACrB,IAAI,GAAM,GAAQ,oBACf,GAAoB,EAAI,CAAE,UAAQ,eAClC,GAIN,GAAG,GAAqB,gBACrB,IAAI,GAAM,GAAiB,EAAI,CAAE,aAAW,cAG/C,GAAG,GAAqB,WACrB,IAAI,GAAM,EAAG,aAAa,kBAAoB,aAC3C,GAAG,GAAS,IAAM,GAAa,EAAI,CAAE,aAAW,WAAS,YACzD,GAAG,GAAS,IAAM,GAAa,EAAI,CAAE,aAAW,WAAS,aAI/D,GAAG,GAAqB,QACrB,IAAI,GAAM,GAAU,EAAI,CAAE,aAAW,cAGxC,GAAG,GAAqB,OACrB,IAAI,GAAM,GAAqB,EAAI,CAAE,aAAW,cAGnD,GAAG,GAAqB,OACrB,IAAI,GAAM,GAAe,EAAI,CAAE,aAAW,WAAS,SAAO,gBAIzD,GAAa,GAChB,KACC,EAAU,IAAM,IAChB,GAAU,IACV,EAAY,IAIhB,GAAW,YAMX,OAAO,UAAa,GACpB,OAAO,UAAa,GACpB,OAAO,QAAa,GACpB,OAAO,UAAa,GACpB,OAAO,UAAa,GACpB,OAAO,QAAa,GACpB,OAAO,QAAa,GACpB,OAAO,OAAa,GACpB,OAAO,OAAa,GACpB,OAAO,WAAa", - "names": [] -} diff --git a/assets/javascripts/lunr/min/lunr.ar.min.js b/assets/javascripts/lunr/min/lunr.ar.min.js deleted file mode 100644 index 248ddc5d14da..000000000000 --- a/assets/javascripts/lunr/min/lunr.ar.min.js +++ /dev/null @@ -1 +0,0 @@ -!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.ar=function(){this.pipeline.reset(),this.pipeline.add(e.ar.trimmer,e.ar.stopWordFilter,e.ar.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.ar.stemmer))},e.ar.wordCharacters="ء-ٛٱـ",e.ar.trimmer=e.trimmerSupport.generateTrimmer(e.ar.wordCharacters),e.Pipeline.registerFunction(e.ar.trimmer,"trimmer-ar"),e.ar.stemmer=function(){var e=this;return e.result=!1,e.preRemoved=!1,e.sufRemoved=!1,e.pre={pre1:"ف ك ب و س ل ن ا ي ت",pre2:"ال لل",pre3:"بال وال فال تال كال ولل",pre4:"فبال كبال وبال وكال"},e.suf={suf1:"ه ك ت ن ا ي",suf2:"نك نه ها وك يا اه ون ين تن تم نا وا ان كم كن ني نن ما هم هن تك ته ات يه",suf3:"تين كهم نيه نهم ونه وها يهم ونا ونك وني وهم تكم تنا تها تني تهم كما كها ناه نكم هنا تان يها",suf4:"كموه ناها ونني ونهم تكما تموه تكاه كماه ناكم ناهم نيها وننا"},e.patterns=JSON.parse('{"pt43":[{"pt":[{"c":"ا","l":1}]},{"pt":[{"c":"ا,ت,ن,ي","l":0}],"mPt":[{"c":"ف","l":0,"m":1},{"c":"ع","l":1,"m":2},{"c":"ل","l":2,"m":3}]},{"pt":[{"c":"و","l":2}],"mPt":[{"c":"ف","l":0,"m":0},{"c":"ع","l":1,"m":1},{"c":"ل","l":2,"m":3}]},{"pt":[{"c":"ا","l":2}]},{"pt":[{"c":"ي","l":2}],"mPt":[{"c":"ف","l":0,"m":0},{"c":"ع","l":1,"m":1},{"c":"ا","l":2},{"c":"ل","l":3,"m":3}]},{"pt":[{"c":"م","l":0}]}],"pt53":[{"pt":[{"c":"ت","l":0},{"c":"ا","l":2}]},{"pt":[{"c":"ا,ن,ت,ي","l":0},{"c":"ت","l":2}],"mPt":[{"c":"ا","l":0},{"c":"ف","l":1,"m":1},{"c":"ت","l":2},{"c":"ع","l":3,"m":3},{"c":"ا","l":4},{"c":"ل","l":5,"m":4}]},{"pt":[{"c":"ا","l":0},{"c":"ا","l":2}],"mPt":[{"c":"ا","l":0},{"c":"ف","l":1,"m":1},{"c":"ع","l":2,"m":3},{"c":"ل","l":3,"m":4},{"c":"ا","l":4},{"c":"ل","l":5,"m":4}]},{"pt":[{"c":"ا","l":0},{"c":"ا","l":3}],"mPt":[{"c":"ف","l":0,"m":1},{"c":"ع","l":1,"m":2},{"c":"ل","l":2,"m":4}]},{"pt":[{"c":"ا","l":3},{"c":"ن","l":4}]},{"pt":[{"c":"ت","l":0},{"c":"ي","l":3}]},{"pt":[{"c":"م","l":0},{"c":"و","l":3}]},{"pt":[{"c":"ا","l":1},{"c":"و","l":3}]},{"pt":[{"c":"و","l":1},{"c":"ا","l":2}]},{"pt":[{"c":"م","l":0},{"c":"ا","l":3}]},{"pt":[{"c":"م","l":0},{"c":"ي","l":3}]},{"pt":[{"c":"ا","l":2},{"c":"ن","l":3}]},{"pt":[{"c":"م","l":0},{"c":"ن","l":1}],"mPt":[{"c":"ا","l":0},{"c":"ن","l":1},{"c":"ف","l":2,"m":2},{"c":"ع","l":3,"m":3},{"c":"ا","l":4},{"c":"ل","l":5,"m":4}]},{"pt":[{"c":"م","l":0},{"c":"ت","l":2}],"mPt":[{"c":"ا","l":0},{"c":"ف","l":1,"m":1},{"c":"ت","l":2},{"c":"ع","l":3,"m":3},{"c":"ا","l":4},{"c":"ل","l":5,"m":4}]},{"pt":[{"c":"م","l":0},{"c":"ا","l":2}]},{"pt":[{"c":"م","l":1},{"c":"ا","l":3}]},{"pt":[{"c":"ي,ت,ا,ن","l":0},{"c":"ت","l":1}],"mPt":[{"c":"ف","l":0,"m":2},{"c":"ع","l":1,"m":3},{"c":"ا","l":2},{"c":"ل","l":3,"m":4}]},{"pt":[{"c":"ت,ي,ا,ن","l":0},{"c":"ت","l":2}],"mPt":[{"c":"ا","l":0},{"c":"ف","l":1,"m":1},{"c":"ت","l":2},{"c":"ع","l":3,"m":3},{"c":"ا","l":4},{"c":"ل","l":5,"m":4}]},{"pt":[{"c":"ا","l":2},{"c":"ي","l":3}]},{"pt":[{"c":"ا,ي,ت,ن","l":0},{"c":"ن","l":1}],"mPt":[{"c":"ا","l":0},{"c":"ن","l":1},{"c":"ف","l":2,"m":2},{"c":"ع","l":3,"m":3},{"c":"ا","l":4},{"c":"ل","l":5,"m":4}]},{"pt":[{"c":"ا","l":3},{"c":"ء","l":4}]}],"pt63":[{"pt":[{"c":"ا","l":0},{"c":"ت","l":2},{"c":"ا","l":4}]},{"pt":[{"c":"ا,ت,ن,ي","l":0},{"c":"س","l":1},{"c":"ت","l":2}],"mPt":[{"c":"ا","l":0},{"c":"س","l":1},{"c":"ت","l":2},{"c":"ف","l":3,"m":3},{"c":"ع","l":4,"m":4},{"c":"ا","l":5},{"c":"ل","l":6,"m":5}]},{"pt":[{"c":"ا,ن,ت,ي","l":0},{"c":"و","l":3}]},{"pt":[{"c":"م","l":0},{"c":"س","l":1},{"c":"ت","l":2}],"mPt":[{"c":"ا","l":0},{"c":"س","l":1},{"c":"ت","l":2},{"c":"ف","l":3,"m":3},{"c":"ع","l":4,"m":4},{"c":"ا","l":5},{"c":"ل","l":6,"m":5}]},{"pt":[{"c":"ي","l":1},{"c":"ي","l":3},{"c":"ا","l":4},{"c":"ء","l":5}]},{"pt":[{"c":"ا","l":0},{"c":"ن","l":1},{"c":"ا","l":4}]}],"pt54":[{"pt":[{"c":"ت","l":0}]},{"pt":[{"c":"ا,ي,ت,ن","l":0}],"mPt":[{"c":"ا","l":0},{"c":"ف","l":1,"m":1},{"c":"ع","l":2,"m":2},{"c":"ل","l":3,"m":3},{"c":"ر","l":4,"m":4},{"c":"ا","l":5},{"c":"ر","l":6,"m":4}]},{"pt":[{"c":"م","l":0}],"mPt":[{"c":"ا","l":0},{"c":"ف","l":1,"m":1},{"c":"ع","l":2,"m":2},{"c":"ل","l":3,"m":3},{"c":"ر","l":4,"m":4},{"c":"ا","l":5},{"c":"ر","l":6,"m":4}]},{"pt":[{"c":"ا","l":2}]},{"pt":[{"c":"ا","l":0},{"c":"ن","l":2}]}],"pt64":[{"pt":[{"c":"ا","l":0},{"c":"ا","l":4}]},{"pt":[{"c":"م","l":0},{"c":"ت","l":1}]}],"pt73":[{"pt":[{"c":"ا","l":0},{"c":"س","l":1},{"c":"ت","l":2},{"c":"ا","l":5}]}],"pt75":[{"pt":[{"c":"ا","l":0},{"c":"ا","l":5}]}]}'),e.execArray=["cleanWord","removeDiacritics","cleanAlef","removeStopWords","normalizeHamzaAndAlef","removeStartWaw","removePre432","removeEndTaa","wordCheck"],e.stem=function(){var r=0;for(e.result=!1,e.preRemoved=!1,e.sufRemoved=!1;r=0)return!0},e.normalizeHamzaAndAlef=function(){return e.word=e.word.replace("ؤ","ء"),e.word=e.word.replace("ئ","ء"),e.word=e.word.replace(/([\u0627])\1+/gi,"ا"),!1},e.removeEndTaa=function(){return!(e.word.length>2)||(e.word=e.word.replace(/[\u0627]$/,""),e.word=e.word.replace("ة",""),!1)},e.removeStartWaw=function(){return e.word.length>3&&"و"==e.word[0]&&"و"==e.word[1]&&(e.word=e.word.slice(1)),!1},e.removePre432=function(){var r=e.word;if(e.word.length>=7){var t=new RegExp("^("+e.pre.pre4.split(" ").join("|")+")");e.word=e.word.replace(t,"")}if(e.word==r&&e.word.length>=6){var c=new RegExp("^("+e.pre.pre3.split(" ").join("|")+")");e.word=e.word.replace(c,"")}if(e.word==r&&e.word.length>=5){var l=new RegExp("^("+e.pre.pre2.split(" ").join("|")+")");e.word=e.word.replace(l,"")}return r!=e.word&&(e.preRemoved=!0),!1},e.patternCheck=function(r){for(var t=0;t3){var t=new RegExp("^("+e.pre.pre1.split(" ").join("|")+")");e.word=e.word.replace(t,"")}return r!=e.word&&(e.preRemoved=!0),!1},e.removeSuf1=function(){var r=e.word;if(0==e.sufRemoved&&e.word.length>3){var t=new RegExp("("+e.suf.suf1.split(" ").join("|")+")$");e.word=e.word.replace(t,"")}return r!=e.word&&(e.sufRemoved=!0),!1},e.removeSuf432=function(){var r=e.word;if(e.word.length>=6){var t=new RegExp("("+e.suf.suf4.split(" ").join("|")+")$");e.word=e.word.replace(t,"")}if(e.word==r&&e.word.length>=5){var c=new RegExp("("+e.suf.suf3.split(" ").join("|")+")$");e.word=e.word.replace(c,"")}if(e.word==r&&e.word.length>=4){var l=new RegExp("("+e.suf.suf2.split(" ").join("|")+")$");e.word=e.word.replace(l,"")}return r!=e.word&&(e.sufRemoved=!0),!1},e.wordCheck=function(){for(var r=(e.word,[e.removeSuf432,e.removeSuf1,e.removePre1]),t=0,c=!1;e.word.length>=7&&!e.result&&t=f.limit)return;f.cursor++}for(;!f.out_grouping(w,97,248);){if(f.cursor>=f.limit)return;f.cursor++}d=f.cursor,d=d&&(r=f.limit_backward,f.limit_backward=d,f.ket=f.cursor,e=f.find_among_b(c,32),f.limit_backward=r,e))switch(f.bra=f.cursor,e){case 1:f.slice_del();break;case 2:f.in_grouping_b(p,97,229)&&f.slice_del()}}function t(){var e,r=f.limit-f.cursor;f.cursor>=d&&(e=f.limit_backward,f.limit_backward=d,f.ket=f.cursor,f.find_among_b(l,4)?(f.bra=f.cursor,f.limit_backward=e,f.cursor=f.limit-r,f.cursor>f.limit_backward&&(f.cursor--,f.bra=f.cursor,f.slice_del())):f.limit_backward=e)}function s(){var e,r,i,n=f.limit-f.cursor;if(f.ket=f.cursor,f.eq_s_b(2,"st")&&(f.bra=f.cursor,f.eq_s_b(2,"ig")&&f.slice_del()),f.cursor=f.limit-n,f.cursor>=d&&(r=f.limit_backward,f.limit_backward=d,f.ket=f.cursor,e=f.find_among_b(m,5),f.limit_backward=r,e))switch(f.bra=f.cursor,e){case 1:f.slice_del(),i=f.limit-f.cursor,t(),f.cursor=f.limit-i;break;case 2:f.slice_from("løs")}}function o(){var e;f.cursor>=d&&(e=f.limit_backward,f.limit_backward=d,f.ket=f.cursor,f.out_grouping_b(w,97,248)?(f.bra=f.cursor,u=f.slice_to(u),f.limit_backward=e,f.eq_v_b(u)&&f.slice_del()):f.limit_backward=e)}var a,d,u,c=[new r("hed",-1,1),new r("ethed",0,1),new r("ered",-1,1),new r("e",-1,1),new r("erede",3,1),new r("ende",3,1),new r("erende",5,1),new r("ene",3,1),new r("erne",3,1),new r("ere",3,1),new r("en",-1,1),new r("heden",10,1),new r("eren",10,1),new r("er",-1,1),new r("heder",13,1),new r("erer",13,1),new r("s",-1,2),new r("heds",16,1),new r("es",16,1),new r("endes",18,1),new r("erendes",19,1),new r("enes",18,1),new r("ernes",18,1),new r("eres",18,1),new r("ens",16,1),new r("hedens",24,1),new r("erens",24,1),new r("ers",16,1),new r("ets",16,1),new r("erets",28,1),new r("et",-1,1),new r("eret",30,1)],l=[new r("gd",-1,-1),new r("dt",-1,-1),new r("gt",-1,-1),new r("kt",-1,-1)],m=[new r("ig",-1,1),new r("lig",0,1),new r("elig",1,1),new r("els",-1,1),new r("løst",-1,2)],w=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,48,0,128],p=[239,254,42,3,0,0,0,0,0,0,0,0,0,0,0,0,16],f=new i;this.setCurrent=function(e){f.setCurrent(e)},this.getCurrent=function(){return f.getCurrent()},this.stem=function(){var r=f.cursor;return e(),f.limit_backward=r,f.cursor=f.limit,n(),f.cursor=f.limit,t(),f.cursor=f.limit,s(),f.cursor=f.limit,o(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}}(),e.Pipeline.registerFunction(e.da.stemmer,"stemmer-da"),e.da.stopWordFilter=e.generateStopWordFilter("ad af alle alt anden at blev blive bliver da de dem den denne der deres det dette dig din disse dog du efter eller en end er et for fra ham han hans har havde have hende hendes her hos hun hvad hvis hvor i ikke ind jeg jer jo kunne man mange med meget men mig min mine mit mod ned noget nogle nu når og også om op os over på selv sig sin sine sit skal skulle som sådan thi til ud under var vi vil ville vor være været".split(" ")),e.Pipeline.registerFunction(e.da.stopWordFilter,"stopWordFilter-da")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.de.min.js b/assets/javascripts/lunr/min/lunr.de.min.js deleted file mode 100644 index f3b5c108c9ee..000000000000 --- a/assets/javascripts/lunr/min/lunr.de.min.js +++ /dev/null @@ -1,18 +0,0 @@ -/*! - * Lunr languages, `German` language - * https://github.com/MihaiValentin/lunr-languages - * - * Copyright 2014, Mihai Valentin - * http://www.mozilla.org/MPL/ - */ -/*! - * based on - * Snowball JavaScript Library v0.3 - * http://code.google.com/p/urim/ - * http://snowball.tartarus.org/ - * - * Copyright 2010, Oleg Mazko - * http://www.mozilla.org/MPL/ - */ - -!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.de=function(){this.pipeline.reset(),this.pipeline.add(e.de.trimmer,e.de.stopWordFilter,e.de.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.de.stemmer))},e.de.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.de.trimmer=e.trimmerSupport.generateTrimmer(e.de.wordCharacters),e.Pipeline.registerFunction(e.de.trimmer,"trimmer-de"),e.de.stemmer=function(){var r=e.stemmerSupport.Among,n=e.stemmerSupport.SnowballProgram,i=new function(){function e(e,r,n){return!(!v.eq_s(1,e)||(v.ket=v.cursor,!v.in_grouping(p,97,252)))&&(v.slice_from(r),v.cursor=n,!0)}function i(){for(var r,n,i,s,t=v.cursor;;)if(r=v.cursor,v.bra=r,v.eq_s(1,"ß"))v.ket=v.cursor,v.slice_from("ss");else{if(r>=v.limit)break;v.cursor=r+1}for(v.cursor=t;;)for(n=v.cursor;;){if(i=v.cursor,v.in_grouping(p,97,252)){if(s=v.cursor,v.bra=s,e("u","U",i))break;if(v.cursor=s,e("y","Y",i))break}if(i>=v.limit)return void(v.cursor=n);v.cursor=i+1}}function s(){for(;!v.in_grouping(p,97,252);){if(v.cursor>=v.limit)return!0;v.cursor++}for(;!v.out_grouping(p,97,252);){if(v.cursor>=v.limit)return!0;v.cursor++}return!1}function t(){m=v.limit,l=m;var e=v.cursor+3;0<=e&&e<=v.limit&&(d=e,s()||(m=v.cursor,m=v.limit)return;v.cursor++}}}function c(){return m<=v.cursor}function u(){return l<=v.cursor}function a(){var e,r,n,i,s=v.limit-v.cursor;if(v.ket=v.cursor,(e=v.find_among_b(w,7))&&(v.bra=v.cursor,c()))switch(e){case 1:v.slice_del();break;case 2:v.slice_del(),v.ket=v.cursor,v.eq_s_b(1,"s")&&(v.bra=v.cursor,v.eq_s_b(3,"nis")&&v.slice_del());break;case 3:v.in_grouping_b(g,98,116)&&v.slice_del()}if(v.cursor=v.limit-s,v.ket=v.cursor,(e=v.find_among_b(f,4))&&(v.bra=v.cursor,c()))switch(e){case 1:v.slice_del();break;case 2:if(v.in_grouping_b(k,98,116)){var t=v.cursor-3;v.limit_backward<=t&&t<=v.limit&&(v.cursor=t,v.slice_del())}}if(v.cursor=v.limit-s,v.ket=v.cursor,(e=v.find_among_b(_,8))&&(v.bra=v.cursor,u()))switch(e){case 1:v.slice_del(),v.ket=v.cursor,v.eq_s_b(2,"ig")&&(v.bra=v.cursor,r=v.limit-v.cursor,v.eq_s_b(1,"e")||(v.cursor=v.limit-r,u()&&v.slice_del()));break;case 2:n=v.limit-v.cursor,v.eq_s_b(1,"e")||(v.cursor=v.limit-n,v.slice_del());break;case 3:if(v.slice_del(),v.ket=v.cursor,i=v.limit-v.cursor,!v.eq_s_b(2,"er")&&(v.cursor=v.limit-i,!v.eq_s_b(2,"en")))break;v.bra=v.cursor,c()&&v.slice_del();break;case 4:v.slice_del(),v.ket=v.cursor,e=v.find_among_b(b,2),e&&(v.bra=v.cursor,u()&&1==e&&v.slice_del())}}var d,l,m,h=[new r("",-1,6),new r("U",0,2),new r("Y",0,1),new r("ä",0,3),new r("ö",0,4),new r("ü",0,5)],w=[new r("e",-1,2),new r("em",-1,1),new r("en",-1,2),new r("ern",-1,1),new r("er",-1,1),new r("s",-1,3),new r("es",5,2)],f=[new r("en",-1,1),new r("er",-1,1),new r("st",-1,2),new r("est",2,1)],b=[new r("ig",-1,1),new r("lich",-1,1)],_=[new r("end",-1,1),new r("ig",-1,2),new r("ung",-1,1),new r("lich",-1,3),new r("isch",-1,2),new r("ik",-1,2),new r("heit",-1,3),new r("keit",-1,4)],p=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32,8],g=[117,30,5],k=[117,30,4],v=new n;this.setCurrent=function(e){v.setCurrent(e)},this.getCurrent=function(){return v.getCurrent()},this.stem=function(){var e=v.cursor;return i(),v.cursor=e,t(),v.limit_backward=e,v.cursor=v.limit,a(),v.cursor=v.limit_backward,o(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}}(),e.Pipeline.registerFunction(e.de.stemmer,"stemmer-de"),e.de.stopWordFilter=e.generateStopWordFilter("aber alle allem allen aller alles als also am an ander andere anderem anderen anderer anderes anderm andern anderr anders auch auf aus bei bin bis bist da damit dann das dasselbe dazu daß dein deine deinem deinen deiner deines dem demselben den denn denselben der derer derselbe derselben des desselben dessen dich die dies diese dieselbe dieselben diesem diesen dieser dieses dir doch dort du durch ein eine einem einen einer eines einig einige einigem einigen einiger einiges einmal er es etwas euch euer eure eurem euren eurer eures für gegen gewesen hab habe haben hat hatte hatten hier hin hinter ich ihm ihn ihnen ihr ihre ihrem ihren ihrer ihres im in indem ins ist jede jedem jeden jeder jedes jene jenem jenen jener jenes jetzt kann kein keine keinem keinen keiner keines können könnte machen man manche manchem manchen mancher manches mein meine meinem meinen meiner meines mich mir mit muss musste nach nicht nichts noch nun nur ob oder ohne sehr sein seine seinem seinen seiner seines selbst sich sie sind so solche solchem solchen solcher solches soll sollte sondern sonst um und uns unse unsem unsen unser unses unter viel vom von vor war waren warst was weg weil weiter welche welchem welchen welcher welches wenn werde werden wie wieder will wir wird wirst wo wollen wollte während würde würden zu zum zur zwar zwischen über".split(" ")),e.Pipeline.registerFunction(e.de.stopWordFilter,"stopWordFilter-de")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.du.min.js b/assets/javascripts/lunr/min/lunr.du.min.js deleted file mode 100644 index 49a0f3f0ac17..000000000000 --- a/assets/javascripts/lunr/min/lunr.du.min.js +++ /dev/null @@ -1,18 +0,0 @@ -/*! - * Lunr languages, `Dutch` language - * https://github.com/MihaiValentin/lunr-languages - * - * Copyright 2014, Mihai Valentin - * http://www.mozilla.org/MPL/ - */ -/*! - * based on - * Snowball JavaScript Library v0.3 - * http://code.google.com/p/urim/ - * http://snowball.tartarus.org/ - * - * Copyright 2010, Oleg Mazko - * http://www.mozilla.org/MPL/ - */ - -!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");console.warn('[Lunr Languages] Please use the "nl" instead of the "du". The "nl" code is the standard code for Dutch language, and "du" will be removed in the next major versions.'),e.du=function(){this.pipeline.reset(),this.pipeline.add(e.du.trimmer,e.du.stopWordFilter,e.du.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.du.stemmer))},e.du.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.du.trimmer=e.trimmerSupport.generateTrimmer(e.du.wordCharacters),e.Pipeline.registerFunction(e.du.trimmer,"trimmer-du"),e.du.stemmer=function(){var r=e.stemmerSupport.Among,i=e.stemmerSupport.SnowballProgram,n=new function(){function e(){for(var e,r,i,o=C.cursor;;){if(C.bra=C.cursor,e=C.find_among(b,11))switch(C.ket=C.cursor,e){case 1:C.slice_from("a");continue;case 2:C.slice_from("e");continue;case 3:C.slice_from("i");continue;case 4:C.slice_from("o");continue;case 5:C.slice_from("u");continue;case 6:if(C.cursor>=C.limit)break;C.cursor++;continue}break}for(C.cursor=o,C.bra=o,C.eq_s(1,"y")?(C.ket=C.cursor,C.slice_from("Y")):C.cursor=o;;)if(r=C.cursor,C.in_grouping(q,97,232)){if(i=C.cursor,C.bra=i,C.eq_s(1,"i"))C.ket=C.cursor,C.in_grouping(q,97,232)&&(C.slice_from("I"),C.cursor=r);else if(C.cursor=i,C.eq_s(1,"y"))C.ket=C.cursor,C.slice_from("Y"),C.cursor=r;else if(n(r))break}else if(n(r))break}function n(e){return C.cursor=e,e>=C.limit||(C.cursor++,!1)}function o(){_=C.limit,f=_,t()||(_=C.cursor,_<3&&(_=3),t()||(f=C.cursor))}function t(){for(;!C.in_grouping(q,97,232);){if(C.cursor>=C.limit)return!0;C.cursor++}for(;!C.out_grouping(q,97,232);){if(C.cursor>=C.limit)return!0;C.cursor++}return!1}function s(){for(var e;;)if(C.bra=C.cursor,e=C.find_among(p,3))switch(C.ket=C.cursor,e){case 1:C.slice_from("y");break;case 2:C.slice_from("i");break;case 3:if(C.cursor>=C.limit)return;C.cursor++}}function u(){return _<=C.cursor}function c(){return f<=C.cursor}function a(){var e=C.limit-C.cursor;C.find_among_b(g,3)&&(C.cursor=C.limit-e,C.ket=C.cursor,C.cursor>C.limit_backward&&(C.cursor--,C.bra=C.cursor,C.slice_del()))}function l(){var e;w=!1,C.ket=C.cursor,C.eq_s_b(1,"e")&&(C.bra=C.cursor,u()&&(e=C.limit-C.cursor,C.out_grouping_b(q,97,232)&&(C.cursor=C.limit-e,C.slice_del(),w=!0,a())))}function m(){var e;u()&&(e=C.limit-C.cursor,C.out_grouping_b(q,97,232)&&(C.cursor=C.limit-e,C.eq_s_b(3,"gem")||(C.cursor=C.limit-e,C.slice_del(),a())))}function d(){var e,r,i,n,o,t,s=C.limit-C.cursor;if(C.ket=C.cursor,e=C.find_among_b(h,5))switch(C.bra=C.cursor,e){case 1:u()&&C.slice_from("heid");break;case 2:m();break;case 3:u()&&C.out_grouping_b(z,97,232)&&C.slice_del()}if(C.cursor=C.limit-s,l(),C.cursor=C.limit-s,C.ket=C.cursor,C.eq_s_b(4,"heid")&&(C.bra=C.cursor,c()&&(r=C.limit-C.cursor,C.eq_s_b(1,"c")||(C.cursor=C.limit-r,C.slice_del(),C.ket=C.cursor,C.eq_s_b(2,"en")&&(C.bra=C.cursor,m())))),C.cursor=C.limit-s,C.ket=C.cursor,e=C.find_among_b(k,6))switch(C.bra=C.cursor,e){case 1:if(c()){if(C.slice_del(),i=C.limit-C.cursor,C.ket=C.cursor,C.eq_s_b(2,"ig")&&(C.bra=C.cursor,c()&&(n=C.limit-C.cursor,!C.eq_s_b(1,"e")))){C.cursor=C.limit-n,C.slice_del();break}C.cursor=C.limit-i,a()}break;case 2:c()&&(o=C.limit-C.cursor,C.eq_s_b(1,"e")||(C.cursor=C.limit-o,C.slice_del()));break;case 3:c()&&(C.slice_del(),l());break;case 4:c()&&C.slice_del();break;case 5:c()&&w&&C.slice_del()}C.cursor=C.limit-s,C.out_grouping_b(j,73,232)&&(t=C.limit-C.cursor,C.find_among_b(v,4)&&C.out_grouping_b(q,97,232)&&(C.cursor=C.limit-t,C.ket=C.cursor,C.cursor>C.limit_backward&&(C.cursor--,C.bra=C.cursor,C.slice_del())))}var f,_,w,b=[new r("",-1,6),new r("á",0,1),new r("ä",0,1),new r("é",0,2),new r("ë",0,2),new r("í",0,3),new r("ï",0,3),new r("ó",0,4),new r("ö",0,4),new r("ú",0,5),new r("ü",0,5)],p=[new r("",-1,3),new r("I",0,2),new r("Y",0,1)],g=[new r("dd",-1,-1),new r("kk",-1,-1),new r("tt",-1,-1)],h=[new r("ene",-1,2),new r("se",-1,3),new r("en",-1,2),new r("heden",2,1),new r("s",-1,3)],k=[new r("end",-1,1),new r("ig",-1,2),new r("ing",-1,1),new r("lijk",-1,3),new r("baar",-1,4),new r("bar",-1,5)],v=[new r("aa",-1,-1),new r("ee",-1,-1),new r("oo",-1,-1),new r("uu",-1,-1)],q=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],j=[1,0,0,17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],z=[17,67,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],C=new i;this.setCurrent=function(e){C.setCurrent(e)},this.getCurrent=function(){return C.getCurrent()},this.stem=function(){var r=C.cursor;return e(),C.cursor=r,o(),C.limit_backward=r,C.cursor=C.limit,d(),C.cursor=C.limit_backward,s(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}}(),e.Pipeline.registerFunction(e.du.stemmer,"stemmer-du"),e.du.stopWordFilter=e.generateStopWordFilter(" aan al alles als altijd andere ben bij daar dan dat de der deze die dit doch doen door dus een eens en er ge geen geweest haar had heb hebben heeft hem het hier hij hoe hun iemand iets ik in is ja je kan kon kunnen maar me meer men met mij mijn moet na naar niet niets nog nu of om omdat onder ons ook op over reeds te tegen toch toen tot u uit uw van veel voor want waren was wat werd wezen wie wil worden wordt zal ze zelf zich zij zijn zo zonder zou".split(" ")),e.Pipeline.registerFunction(e.du.stopWordFilter,"stopWordFilter-du")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.es.min.js b/assets/javascripts/lunr/min/lunr.es.min.js deleted file mode 100644 index 2989d34265c9..000000000000 --- a/assets/javascripts/lunr/min/lunr.es.min.js +++ /dev/null @@ -1,18 +0,0 @@ -/*! - * Lunr languages, `Spanish` language - * https://github.com/MihaiValentin/lunr-languages - * - * Copyright 2014, Mihai Valentin - * http://www.mozilla.org/MPL/ - */ -/*! - * based on - * Snowball JavaScript Library v0.3 - * http://code.google.com/p/urim/ - * http://snowball.tartarus.org/ - * - * Copyright 2010, Oleg Mazko - * http://www.mozilla.org/MPL/ - */ - -!function(e,s){"function"==typeof define&&define.amd?define(s):"object"==typeof exports?module.exports=s():s()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.es=function(){this.pipeline.reset(),this.pipeline.add(e.es.trimmer,e.es.stopWordFilter,e.es.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.es.stemmer))},e.es.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.es.trimmer=e.trimmerSupport.generateTrimmer(e.es.wordCharacters),e.Pipeline.registerFunction(e.es.trimmer,"trimmer-es"),e.es.stemmer=function(){var s=e.stemmerSupport.Among,r=e.stemmerSupport.SnowballProgram,n=new function(){function e(){if(A.out_grouping(x,97,252)){for(;!A.in_grouping(x,97,252);){if(A.cursor>=A.limit)return!0;A.cursor++}return!1}return!0}function n(){if(A.in_grouping(x,97,252)){var s=A.cursor;if(e()){if(A.cursor=s,!A.in_grouping(x,97,252))return!0;for(;!A.out_grouping(x,97,252);){if(A.cursor>=A.limit)return!0;A.cursor++}}return!1}return!0}function i(){var s,r=A.cursor;if(n()){if(A.cursor=r,!A.out_grouping(x,97,252))return;if(s=A.cursor,e()){if(A.cursor=s,!A.in_grouping(x,97,252)||A.cursor>=A.limit)return;A.cursor++}}g=A.cursor}function a(){for(;!A.in_grouping(x,97,252);){if(A.cursor>=A.limit)return!1;A.cursor++}for(;!A.out_grouping(x,97,252);){if(A.cursor>=A.limit)return!1;A.cursor++}return!0}function t(){var e=A.cursor;g=A.limit,p=g,v=g,i(),A.cursor=e,a()&&(p=A.cursor,a()&&(v=A.cursor))}function o(){for(var e;;){if(A.bra=A.cursor,e=A.find_among(k,6))switch(A.ket=A.cursor,e){case 1:A.slice_from("a");continue;case 2:A.slice_from("e");continue;case 3:A.slice_from("i");continue;case 4:A.slice_from("o");continue;case 5:A.slice_from("u");continue;case 6:if(A.cursor>=A.limit)break;A.cursor++;continue}break}}function u(){return g<=A.cursor}function w(){return p<=A.cursor}function c(){return v<=A.cursor}function m(){var e;if(A.ket=A.cursor,A.find_among_b(y,13)&&(A.bra=A.cursor,(e=A.find_among_b(q,11))&&u()))switch(e){case 1:A.bra=A.cursor,A.slice_from("iendo");break;case 2:A.bra=A.cursor,A.slice_from("ando");break;case 3:A.bra=A.cursor,A.slice_from("ar");break;case 4:A.bra=A.cursor,A.slice_from("er");break;case 5:A.bra=A.cursor,A.slice_from("ir");break;case 6:A.slice_del();break;case 7:A.eq_s_b(1,"u")&&A.slice_del()}}function l(e,s){if(!c())return!0;A.slice_del(),A.ket=A.cursor;var r=A.find_among_b(e,s);return r&&(A.bra=A.cursor,1==r&&c()&&A.slice_del()),!1}function d(e){return!c()||(A.slice_del(),A.ket=A.cursor,A.eq_s_b(2,e)&&(A.bra=A.cursor,c()&&A.slice_del()),!1)}function b(){var e;if(A.ket=A.cursor,e=A.find_among_b(S,46)){switch(A.bra=A.cursor,e){case 1:if(!c())return!1;A.slice_del();break;case 2:if(d("ic"))return!1;break;case 3:if(!c())return!1;A.slice_from("log");break;case 4:if(!c())return!1;A.slice_from("u");break;case 5:if(!c())return!1;A.slice_from("ente");break;case 6:if(!w())return!1;A.slice_del(),A.ket=A.cursor,e=A.find_among_b(C,4),e&&(A.bra=A.cursor,c()&&(A.slice_del(),1==e&&(A.ket=A.cursor,A.eq_s_b(2,"at")&&(A.bra=A.cursor,c()&&A.slice_del()))));break;case 7:if(l(P,3))return!1;break;case 8:if(l(F,3))return!1;break;case 9:if(d("at"))return!1}return!0}return!1}function f(){var e,s;if(A.cursor>=g&&(s=A.limit_backward,A.limit_backward=g,A.ket=A.cursor,e=A.find_among_b(W,12),A.limit_backward=s,e)){if(A.bra=A.cursor,1==e){if(!A.eq_s_b(1,"u"))return!1;A.slice_del()}return!0}return!1}function _(){var e,s,r,n;if(A.cursor>=g&&(s=A.limit_backward,A.limit_backward=g,A.ket=A.cursor,e=A.find_among_b(L,96),A.limit_backward=s,e))switch(A.bra=A.cursor,e){case 1:r=A.limit-A.cursor,A.eq_s_b(1,"u")?(n=A.limit-A.cursor,A.eq_s_b(1,"g")?A.cursor=A.limit-n:A.cursor=A.limit-r):A.cursor=A.limit-r,A.bra=A.cursor;case 2:A.slice_del()}}function h(){var e,s;if(A.ket=A.cursor,e=A.find_among_b(z,8))switch(A.bra=A.cursor,e){case 1:u()&&A.slice_del();break;case 2:u()&&(A.slice_del(),A.ket=A.cursor,A.eq_s_b(1,"u")&&(A.bra=A.cursor,s=A.limit-A.cursor,A.eq_s_b(1,"g")&&(A.cursor=A.limit-s,u()&&A.slice_del())))}}var v,p,g,k=[new s("",-1,6),new s("á",0,1),new s("é",0,2),new s("í",0,3),new s("ó",0,4),new s("ú",0,5)],y=[new s("la",-1,-1),new s("sela",0,-1),new s("le",-1,-1),new s("me",-1,-1),new s("se",-1,-1),new s("lo",-1,-1),new s("selo",5,-1),new s("las",-1,-1),new s("selas",7,-1),new s("les",-1,-1),new s("los",-1,-1),new s("selos",10,-1),new s("nos",-1,-1)],q=[new s("ando",-1,6),new s("iendo",-1,6),new s("yendo",-1,7),new s("ándo",-1,2),new s("iéndo",-1,1),new s("ar",-1,6),new s("er",-1,6),new s("ir",-1,6),new s("ár",-1,3),new s("ér",-1,4),new s("ír",-1,5)],C=[new s("ic",-1,-1),new s("ad",-1,-1),new s("os",-1,-1),new s("iv",-1,1)],P=[new s("able",-1,1),new s("ible",-1,1),new s("ante",-1,1)],F=[new s("ic",-1,1),new s("abil",-1,1),new s("iv",-1,1)],S=[new s("ica",-1,1),new s("ancia",-1,2),new s("encia",-1,5),new s("adora",-1,2),new s("osa",-1,1),new s("ista",-1,1),new s("iva",-1,9),new s("anza",-1,1),new s("logía",-1,3),new s("idad",-1,8),new s("able",-1,1),new s("ible",-1,1),new s("ante",-1,2),new s("mente",-1,7),new s("amente",13,6),new s("ación",-1,2),new s("ución",-1,4),new s("ico",-1,1),new s("ismo",-1,1),new s("oso",-1,1),new s("amiento",-1,1),new s("imiento",-1,1),new s("ivo",-1,9),new s("ador",-1,2),new s("icas",-1,1),new s("ancias",-1,2),new s("encias",-1,5),new s("adoras",-1,2),new s("osas",-1,1),new s("istas",-1,1),new s("ivas",-1,9),new s("anzas",-1,1),new s("logías",-1,3),new s("idades",-1,8),new s("ables",-1,1),new s("ibles",-1,1),new s("aciones",-1,2),new s("uciones",-1,4),new s("adores",-1,2),new s("antes",-1,2),new s("icos",-1,1),new s("ismos",-1,1),new s("osos",-1,1),new s("amientos",-1,1),new s("imientos",-1,1),new s("ivos",-1,9)],W=[new s("ya",-1,1),new s("ye",-1,1),new s("yan",-1,1),new s("yen",-1,1),new s("yeron",-1,1),new s("yendo",-1,1),new s("yo",-1,1),new s("yas",-1,1),new s("yes",-1,1),new s("yais",-1,1),new s("yamos",-1,1),new s("yó",-1,1)],L=[new s("aba",-1,2),new s("ada",-1,2),new s("ida",-1,2),new s("ara",-1,2),new s("iera",-1,2),new s("ía",-1,2),new s("aría",5,2),new s("ería",5,2),new s("iría",5,2),new s("ad",-1,2),new s("ed",-1,2),new s("id",-1,2),new s("ase",-1,2),new s("iese",-1,2),new s("aste",-1,2),new s("iste",-1,2),new s("an",-1,2),new s("aban",16,2),new s("aran",16,2),new s("ieran",16,2),new s("ían",16,2),new s("arían",20,2),new s("erían",20,2),new s("irían",20,2),new s("en",-1,1),new s("asen",24,2),new s("iesen",24,2),new s("aron",-1,2),new s("ieron",-1,2),new s("arán",-1,2),new s("erán",-1,2),new s("irán",-1,2),new s("ado",-1,2),new s("ido",-1,2),new s("ando",-1,2),new s("iendo",-1,2),new s("ar",-1,2),new s("er",-1,2),new s("ir",-1,2),new s("as",-1,2),new s("abas",39,2),new s("adas",39,2),new s("idas",39,2),new s("aras",39,2),new s("ieras",39,2),new s("ías",39,2),new s("arías",45,2),new s("erías",45,2),new s("irías",45,2),new s("es",-1,1),new s("ases",49,2),new s("ieses",49,2),new s("abais",-1,2),new s("arais",-1,2),new s("ierais",-1,2),new s("íais",-1,2),new s("aríais",55,2),new s("eríais",55,2),new s("iríais",55,2),new s("aseis",-1,2),new s("ieseis",-1,2),new s("asteis",-1,2),new s("isteis",-1,2),new s("áis",-1,2),new s("éis",-1,1),new s("aréis",64,2),new s("eréis",64,2),new s("iréis",64,2),new s("ados",-1,2),new s("idos",-1,2),new s("amos",-1,2),new s("ábamos",70,2),new s("áramos",70,2),new s("iéramos",70,2),new s("íamos",70,2),new s("aríamos",74,2),new s("eríamos",74,2),new s("iríamos",74,2),new s("emos",-1,1),new s("aremos",78,2),new s("eremos",78,2),new s("iremos",78,2),new s("ásemos",78,2),new s("iésemos",78,2),new s("imos",-1,2),new s("arás",-1,2),new s("erás",-1,2),new s("irás",-1,2),new s("ís",-1,2),new s("ará",-1,2),new s("erá",-1,2),new s("irá",-1,2),new s("aré",-1,2),new s("eré",-1,2),new s("iré",-1,2),new s("ió",-1,2)],z=[new s("a",-1,1),new s("e",-1,2),new s("o",-1,1),new s("os",-1,1),new s("á",-1,1),new s("é",-1,2),new s("í",-1,1),new s("ó",-1,1)],x=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,1,17,4,10],A=new r;this.setCurrent=function(e){A.setCurrent(e)},this.getCurrent=function(){return A.getCurrent()},this.stem=function(){var e=A.cursor;return t(),A.limit_backward=e,A.cursor=A.limit,m(),A.cursor=A.limit,b()||(A.cursor=A.limit,f()||(A.cursor=A.limit,_())),A.cursor=A.limit,h(),A.cursor=A.limit_backward,o(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}}(),e.Pipeline.registerFunction(e.es.stemmer,"stemmer-es"),e.es.stopWordFilter=e.generateStopWordFilter("a al algo algunas algunos ante antes como con contra cual cuando de del desde donde durante e el ella ellas ellos en entre era erais eran eras eres es esa esas ese eso esos esta estaba estabais estaban estabas estad estada estadas estado estados estamos estando estar estaremos estará estarán estarás estaré estaréis estaría estaríais estaríamos estarían estarías estas este estemos esto estos estoy estuve estuviera estuvierais estuvieran estuvieras estuvieron estuviese estuvieseis estuviesen estuvieses estuvimos estuviste estuvisteis estuviéramos estuviésemos estuvo está estábamos estáis están estás esté estéis estén estés fue fuera fuerais fueran fueras fueron fuese fueseis fuesen fueses fui fuimos fuiste fuisteis fuéramos fuésemos ha habida habidas habido habidos habiendo habremos habrá habrán habrás habré habréis habría habríais habríamos habrían habrías habéis había habíais habíamos habían habías han has hasta hay haya hayamos hayan hayas hayáis he hemos hube hubiera hubierais hubieran hubieras hubieron hubiese hubieseis hubiesen hubieses hubimos hubiste hubisteis hubiéramos hubiésemos hubo la las le les lo los me mi mis mucho muchos muy más mí mía mías mío míos nada ni no nos nosotras nosotros nuestra nuestras nuestro nuestros o os otra otras otro otros para pero poco por porque que quien quienes qué se sea seamos sean seas seremos será serán serás seré seréis sería seríais seríamos serían serías seáis sido siendo sin sobre sois somos son soy su sus suya suyas suyo suyos sí también tanto te tendremos tendrá tendrán tendrás tendré tendréis tendría tendríais tendríamos tendrían tendrías tened tenemos tenga tengamos tengan tengas tengo tengáis tenida tenidas tenido tenidos teniendo tenéis tenía teníais teníamos tenían tenías ti tiene tienen tienes todo todos tu tus tuve tuviera tuvierais tuvieran tuvieras tuvieron tuviese tuvieseis tuviesen tuvieses tuvimos tuviste tuvisteis tuviéramos tuviésemos tuvo tuya tuyas tuyo tuyos tú un una uno unos vosotras vosotros vuestra vuestras vuestro vuestros y ya yo él éramos".split(" ")),e.Pipeline.registerFunction(e.es.stopWordFilter,"stopWordFilter-es")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.fi.min.js b/assets/javascripts/lunr/min/lunr.fi.min.js deleted file mode 100644 index 29f5dfcea8f3..000000000000 --- a/assets/javascripts/lunr/min/lunr.fi.min.js +++ /dev/null @@ -1,18 +0,0 @@ -/*! - * Lunr languages, `Finnish` language - * https://github.com/MihaiValentin/lunr-languages - * - * Copyright 2014, Mihai Valentin - * http://www.mozilla.org/MPL/ - */ -/*! - * based on - * Snowball JavaScript Library v0.3 - * http://code.google.com/p/urim/ - * http://snowball.tartarus.org/ - * - * Copyright 2010, Oleg Mazko - * http://www.mozilla.org/MPL/ - */ - -!function(i,e){"function"==typeof define&&define.amd?define(e):"object"==typeof exports?module.exports=e():e()(i.lunr)}(this,function(){return function(i){if(void 0===i)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===i.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");i.fi=function(){this.pipeline.reset(),this.pipeline.add(i.fi.trimmer,i.fi.stopWordFilter,i.fi.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(i.fi.stemmer))},i.fi.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",i.fi.trimmer=i.trimmerSupport.generateTrimmer(i.fi.wordCharacters),i.Pipeline.registerFunction(i.fi.trimmer,"trimmer-fi"),i.fi.stemmer=function(){var e=i.stemmerSupport.Among,r=i.stemmerSupport.SnowballProgram,n=new function(){function i(){f=A.limit,d=f,n()||(f=A.cursor,n()||(d=A.cursor))}function n(){for(var i;;){if(i=A.cursor,A.in_grouping(W,97,246))break;if(A.cursor=i,i>=A.limit)return!0;A.cursor++}for(A.cursor=i;!A.out_grouping(W,97,246);){if(A.cursor>=A.limit)return!0;A.cursor++}return!1}function t(){return d<=A.cursor}function s(){var i,e;if(A.cursor>=f)if(e=A.limit_backward,A.limit_backward=f,A.ket=A.cursor,i=A.find_among_b(h,10)){switch(A.bra=A.cursor,A.limit_backward=e,i){case 1:if(!A.in_grouping_b(x,97,246))return;break;case 2:if(!t())return}A.slice_del()}else A.limit_backward=e}function o(){var i,e,r;if(A.cursor>=f)if(e=A.limit_backward,A.limit_backward=f,A.ket=A.cursor,i=A.find_among_b(v,9))switch(A.bra=A.cursor,A.limit_backward=e,i){case 1:r=A.limit-A.cursor,A.eq_s_b(1,"k")||(A.cursor=A.limit-r,A.slice_del());break;case 2:A.slice_del(),A.ket=A.cursor,A.eq_s_b(3,"kse")&&(A.bra=A.cursor,A.slice_from("ksi"));break;case 3:A.slice_del();break;case 4:A.find_among_b(p,6)&&A.slice_del();break;case 5:A.find_among_b(g,6)&&A.slice_del();break;case 6:A.find_among_b(j,2)&&A.slice_del()}else A.limit_backward=e}function l(){return A.find_among_b(q,7)}function a(){return A.eq_s_b(1,"i")&&A.in_grouping_b(L,97,246)}function u(){var i,e,r;if(A.cursor>=f)if(e=A.limit_backward,A.limit_backward=f,A.ket=A.cursor,i=A.find_among_b(C,30)){switch(A.bra=A.cursor,A.limit_backward=e,i){case 1:if(!A.eq_s_b(1,"a"))return;break;case 2:case 9:if(!A.eq_s_b(1,"e"))return;break;case 3:if(!A.eq_s_b(1,"i"))return;break;case 4:if(!A.eq_s_b(1,"o"))return;break;case 5:if(!A.eq_s_b(1,"ä"))return;break;case 6:if(!A.eq_s_b(1,"ö"))return;break;case 7:if(r=A.limit-A.cursor,!l()&&(A.cursor=A.limit-r,!A.eq_s_b(2,"ie"))){A.cursor=A.limit-r;break}if(A.cursor=A.limit-r,A.cursor<=A.limit_backward){A.cursor=A.limit-r;break}A.cursor--,A.bra=A.cursor;break;case 8:if(!A.in_grouping_b(W,97,246)||!A.out_grouping_b(W,97,246))return}A.slice_del(),k=!0}else A.limit_backward=e}function c(){var i,e,r;if(A.cursor>=d)if(e=A.limit_backward,A.limit_backward=d,A.ket=A.cursor,i=A.find_among_b(P,14)){if(A.bra=A.cursor,A.limit_backward=e,1==i){if(r=A.limit-A.cursor,A.eq_s_b(2,"po"))return;A.cursor=A.limit-r}A.slice_del()}else A.limit_backward=e}function m(){var i;A.cursor>=f&&(i=A.limit_backward,A.limit_backward=f,A.ket=A.cursor,A.find_among_b(F,2)?(A.bra=A.cursor,A.limit_backward=i,A.slice_del()):A.limit_backward=i)}function w(){var i,e,r,n,t,s;if(A.cursor>=f){if(e=A.limit_backward,A.limit_backward=f,A.ket=A.cursor,A.eq_s_b(1,"t")&&(A.bra=A.cursor,r=A.limit-A.cursor,A.in_grouping_b(W,97,246)&&(A.cursor=A.limit-r,A.slice_del(),A.limit_backward=e,n=A.limit-A.cursor,A.cursor>=d&&(A.cursor=d,t=A.limit_backward,A.limit_backward=A.cursor,A.cursor=A.limit-n,A.ket=A.cursor,i=A.find_among_b(S,2))))){if(A.bra=A.cursor,A.limit_backward=t,1==i){if(s=A.limit-A.cursor,A.eq_s_b(2,"po"))return;A.cursor=A.limit-s}return void A.slice_del()}A.limit_backward=e}}function _(){var i,e,r,n;if(A.cursor>=f){for(i=A.limit_backward,A.limit_backward=f,e=A.limit-A.cursor,l()&&(A.cursor=A.limit-e,A.ket=A.cursor,A.cursor>A.limit_backward&&(A.cursor--,A.bra=A.cursor,A.slice_del())),A.cursor=A.limit-e,A.ket=A.cursor,A.in_grouping_b(y,97,228)&&(A.bra=A.cursor,A.out_grouping_b(W,97,246)&&A.slice_del()),A.cursor=A.limit-e,A.ket=A.cursor,A.eq_s_b(1,"j")&&(A.bra=A.cursor,r=A.limit-A.cursor,A.eq_s_b(1,"o")?A.slice_del():(A.cursor=A.limit-r,A.eq_s_b(1,"u")&&A.slice_del())),A.cursor=A.limit-e,A.ket=A.cursor,A.eq_s_b(1,"o")&&(A.bra=A.cursor,A.eq_s_b(1,"j")&&A.slice_del()),A.cursor=A.limit-e,A.limit_backward=i;;){if(n=A.limit-A.cursor,A.out_grouping_b(W,97,246)){A.cursor=A.limit-n;break}if(A.cursor=A.limit-n,A.cursor<=A.limit_backward)return;A.cursor--}A.ket=A.cursor,A.cursor>A.limit_backward&&(A.cursor--,A.bra=A.cursor,b=A.slice_to(),A.eq_v_b(b)&&A.slice_del())}}var k,b,d,f,h=[new e("pa",-1,1),new e("sti",-1,2),new e("kaan",-1,1),new e("han",-1,1),new e("kin",-1,1),new e("hän",-1,1),new e("kään",-1,1),new e("ko",-1,1),new e("pä",-1,1),new e("kö",-1,1)],p=[new e("lla",-1,-1),new e("na",-1,-1),new e("ssa",-1,-1),new e("ta",-1,-1),new e("lta",3,-1),new e("sta",3,-1)],g=[new e("llä",-1,-1),new e("nä",-1,-1),new e("ssä",-1,-1),new e("tä",-1,-1),new e("ltä",3,-1),new e("stä",3,-1)],j=[new e("lle",-1,-1),new e("ine",-1,-1)],v=[new e("nsa",-1,3),new e("mme",-1,3),new e("nne",-1,3),new e("ni",-1,2),new e("si",-1,1),new e("an",-1,4),new e("en",-1,6),new e("än",-1,5),new e("nsä",-1,3)],q=[new e("aa",-1,-1),new e("ee",-1,-1),new e("ii",-1,-1),new e("oo",-1,-1),new e("uu",-1,-1),new e("ää",-1,-1),new e("öö",-1,-1)],C=[new e("a",-1,8),new e("lla",0,-1),new e("na",0,-1),new e("ssa",0,-1),new e("ta",0,-1),new e("lta",4,-1),new e("sta",4,-1),new e("tta",4,9),new e("lle",-1,-1),new e("ine",-1,-1),new e("ksi",-1,-1),new e("n",-1,7),new e("han",11,1),new e("den",11,-1,a),new e("seen",11,-1,l),new e("hen",11,2),new e("tten",11,-1,a),new e("hin",11,3),new e("siin",11,-1,a),new e("hon",11,4),new e("hän",11,5),new e("hön",11,6),new e("ä",-1,8),new e("llä",22,-1),new e("nä",22,-1),new e("ssä",22,-1),new e("tä",22,-1),new e("ltä",26,-1),new e("stä",26,-1),new e("ttä",26,9)],P=[new e("eja",-1,-1),new e("mma",-1,1),new e("imma",1,-1),new e("mpa",-1,1),new e("impa",3,-1),new e("mmi",-1,1),new e("immi",5,-1),new e("mpi",-1,1),new e("impi",7,-1),new e("ejä",-1,-1),new e("mmä",-1,1),new e("immä",10,-1),new e("mpä",-1,1),new e("impä",12,-1)],F=[new e("i",-1,-1),new e("j",-1,-1)],S=[new e("mma",-1,1),new e("imma",0,-1)],y=[17,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8],W=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32],L=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32],x=[17,97,24,1,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32],A=new r;this.setCurrent=function(i){A.setCurrent(i)},this.getCurrent=function(){return A.getCurrent()},this.stem=function(){var e=A.cursor;return i(),k=!1,A.limit_backward=e,A.cursor=A.limit,s(),A.cursor=A.limit,o(),A.cursor=A.limit,u(),A.cursor=A.limit,c(),A.cursor=A.limit,k?(m(),A.cursor=A.limit):(A.cursor=A.limit,w(),A.cursor=A.limit),_(),!0}};return function(i){return"function"==typeof i.update?i.update(function(i){return n.setCurrent(i),n.stem(),n.getCurrent()}):(n.setCurrent(i),n.stem(),n.getCurrent())}}(),i.Pipeline.registerFunction(i.fi.stemmer,"stemmer-fi"),i.fi.stopWordFilter=i.generateStopWordFilter("ei eivät emme en et ette että he heidän heidät heihin heille heillä heiltä heissä heistä heitä hän häneen hänelle hänellä häneltä hänen hänessä hänestä hänet häntä itse ja johon joiden joihin joiksi joilla joille joilta joina joissa joista joita joka joksi jolla jolle jolta jona jonka jos jossa josta jota jotka kanssa keiden keihin keiksi keille keillä keiltä keinä keissä keistä keitä keneen keneksi kenelle kenellä keneltä kenen kenenä kenessä kenestä kenet ketkä ketkä ketä koska kuin kuka kun me meidän meidät meihin meille meillä meiltä meissä meistä meitä mihin miksi mikä mille millä miltä minkä minkä minua minulla minulle minulta minun minussa minusta minut minuun minä minä missä mistä mitkä mitä mukaan mutta ne niiden niihin niiksi niille niillä niiltä niin niin niinä niissä niistä niitä noiden noihin noiksi noilla noille noilta noin noina noissa noista noita nuo nyt näiden näihin näiksi näille näillä näiltä näinä näissä näistä näitä nämä ole olemme olen olet olette oli olimme olin olisi olisimme olisin olisit olisitte olisivat olit olitte olivat olla olleet ollut on ovat poikki se sekä sen siihen siinä siitä siksi sille sillä sillä siltä sinua sinulla sinulle sinulta sinun sinussa sinusta sinut sinuun sinä sinä sitä tai te teidän teidät teihin teille teillä teiltä teissä teistä teitä tuo tuohon tuoksi tuolla tuolle tuolta tuon tuona tuossa tuosta tuota tähän täksi tälle tällä tältä tämä tämän tänä tässä tästä tätä vaan vai vaikka yli".split(" ")),i.Pipeline.registerFunction(i.fi.stopWordFilter,"stopWordFilter-fi")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.fr.min.js b/assets/javascripts/lunr/min/lunr.fr.min.js deleted file mode 100644 index 68cd0094ae03..000000000000 --- a/assets/javascripts/lunr/min/lunr.fr.min.js +++ /dev/null @@ -1,18 +0,0 @@ -/*! - * Lunr languages, `French` language - * https://github.com/MihaiValentin/lunr-languages - * - * Copyright 2014, Mihai Valentin - * http://www.mozilla.org/MPL/ - */ -/*! - * based on - * Snowball JavaScript Library v0.3 - * http://code.google.com/p/urim/ - * http://snowball.tartarus.org/ - * - * Copyright 2010, Oleg Mazko - * http://www.mozilla.org/MPL/ - */ - -!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.fr=function(){this.pipeline.reset(),this.pipeline.add(e.fr.trimmer,e.fr.stopWordFilter,e.fr.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.fr.stemmer))},e.fr.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.fr.trimmer=e.trimmerSupport.generateTrimmer(e.fr.wordCharacters),e.Pipeline.registerFunction(e.fr.trimmer,"trimmer-fr"),e.fr.stemmer=function(){var r=e.stemmerSupport.Among,s=e.stemmerSupport.SnowballProgram,i=new function(){function e(e,r,s){return!(!W.eq_s(1,e)||(W.ket=W.cursor,!W.in_grouping(F,97,251)))&&(W.slice_from(r),W.cursor=s,!0)}function i(e,r,s){return!!W.eq_s(1,e)&&(W.ket=W.cursor,W.slice_from(r),W.cursor=s,!0)}function n(){for(var r,s;;){if(r=W.cursor,W.in_grouping(F,97,251)){if(W.bra=W.cursor,s=W.cursor,e("u","U",r))continue;if(W.cursor=s,e("i","I",r))continue;if(W.cursor=s,i("y","Y",r))continue}if(W.cursor=r,W.bra=r,!e("y","Y",r)){if(W.cursor=r,W.eq_s(1,"q")&&(W.bra=W.cursor,i("u","U",r)))continue;if(W.cursor=r,r>=W.limit)return;W.cursor++}}}function t(){for(;!W.in_grouping(F,97,251);){if(W.cursor>=W.limit)return!0;W.cursor++}for(;!W.out_grouping(F,97,251);){if(W.cursor>=W.limit)return!0;W.cursor++}return!1}function u(){var e=W.cursor;if(q=W.limit,g=q,p=q,W.in_grouping(F,97,251)&&W.in_grouping(F,97,251)&&W.cursor=W.limit){W.cursor=q;break}W.cursor++}while(!W.in_grouping(F,97,251))}q=W.cursor,W.cursor=e,t()||(g=W.cursor,t()||(p=W.cursor))}function o(){for(var e,r;;){if(r=W.cursor,W.bra=r,!(e=W.find_among(h,4)))break;switch(W.ket=W.cursor,e){case 1:W.slice_from("i");break;case 2:W.slice_from("u");break;case 3:W.slice_from("y");break;case 4:if(W.cursor>=W.limit)return;W.cursor++}}}function c(){return q<=W.cursor}function a(){return g<=W.cursor}function l(){return p<=W.cursor}function w(){var e,r;if(W.ket=W.cursor,e=W.find_among_b(C,43)){switch(W.bra=W.cursor,e){case 1:if(!l())return!1;W.slice_del();break;case 2:if(!l())return!1;W.slice_del(),W.ket=W.cursor,W.eq_s_b(2,"ic")&&(W.bra=W.cursor,l()?W.slice_del():W.slice_from("iqU"));break;case 3:if(!l())return!1;W.slice_from("log");break;case 4:if(!l())return!1;W.slice_from("u");break;case 5:if(!l())return!1;W.slice_from("ent");break;case 6:if(!c())return!1;if(W.slice_del(),W.ket=W.cursor,e=W.find_among_b(z,6))switch(W.bra=W.cursor,e){case 1:l()&&(W.slice_del(),W.ket=W.cursor,W.eq_s_b(2,"at")&&(W.bra=W.cursor,l()&&W.slice_del()));break;case 2:l()?W.slice_del():a()&&W.slice_from("eux");break;case 3:l()&&W.slice_del();break;case 4:c()&&W.slice_from("i")}break;case 7:if(!l())return!1;if(W.slice_del(),W.ket=W.cursor,e=W.find_among_b(y,3))switch(W.bra=W.cursor,e){case 1:l()?W.slice_del():W.slice_from("abl");break;case 2:l()?W.slice_del():W.slice_from("iqU");break;case 3:l()&&W.slice_del()}break;case 8:if(!l())return!1;if(W.slice_del(),W.ket=W.cursor,W.eq_s_b(2,"at")&&(W.bra=W.cursor,l()&&(W.slice_del(),W.ket=W.cursor,W.eq_s_b(2,"ic")))){W.bra=W.cursor,l()?W.slice_del():W.slice_from("iqU");break}break;case 9:W.slice_from("eau");break;case 10:if(!a())return!1;W.slice_from("al");break;case 11:if(l())W.slice_del();else{if(!a())return!1;W.slice_from("eux")}break;case 12:if(!a()||!W.out_grouping_b(F,97,251))return!1;W.slice_del();break;case 13:return c()&&W.slice_from("ant"),!1;case 14:return c()&&W.slice_from("ent"),!1;case 15:return r=W.limit-W.cursor,W.in_grouping_b(F,97,251)&&c()&&(W.cursor=W.limit-r,W.slice_del()),!1}return!0}return!1}function f(){var e,r;if(W.cursor=q){if(s=W.limit_backward,W.limit_backward=q,W.ket=W.cursor,e=W.find_among_b(P,7))switch(W.bra=W.cursor,e){case 1:if(l()){if(i=W.limit-W.cursor,!W.eq_s_b(1,"s")&&(W.cursor=W.limit-i,!W.eq_s_b(1,"t")))break;W.slice_del()}break;case 2:W.slice_from("i");break;case 3:W.slice_del();break;case 4:W.eq_s_b(2,"gu")&&W.slice_del()}W.limit_backward=s}}function b(){var e=W.limit-W.cursor;W.find_among_b(U,5)&&(W.cursor=W.limit-e,W.ket=W.cursor,W.cursor>W.limit_backward&&(W.cursor--,W.bra=W.cursor,W.slice_del()))}function d(){for(var e,r=1;W.out_grouping_b(F,97,251);)r--;if(r<=0){if(W.ket=W.cursor,e=W.limit-W.cursor,!W.eq_s_b(1,"é")&&(W.cursor=W.limit-e,!W.eq_s_b(1,"è")))return;W.bra=W.cursor,W.slice_from("e")}}function k(){if(!w()&&(W.cursor=W.limit,!f()&&(W.cursor=W.limit,!m())))return W.cursor=W.limit,void _();W.cursor=W.limit,W.ket=W.cursor,W.eq_s_b(1,"Y")?(W.bra=W.cursor,W.slice_from("i")):(W.cursor=W.limit,W.eq_s_b(1,"ç")&&(W.bra=W.cursor,W.slice_from("c")))}var p,g,q,v=[new r("col",-1,-1),new r("par",-1,-1),new r("tap",-1,-1)],h=[new r("",-1,4),new r("I",0,1),new r("U",0,2),new r("Y",0,3)],z=[new r("iqU",-1,3),new r("abl",-1,3),new r("Ièr",-1,4),new r("ièr",-1,4),new r("eus",-1,2),new r("iv",-1,1)],y=[new r("ic",-1,2),new r("abil",-1,1),new r("iv",-1,3)],C=[new r("iqUe",-1,1),new r("atrice",-1,2),new r("ance",-1,1),new r("ence",-1,5),new r("logie",-1,3),new r("able",-1,1),new r("isme",-1,1),new r("euse",-1,11),new r("iste",-1,1),new r("ive",-1,8),new r("if",-1,8),new r("usion",-1,4),new r("ation",-1,2),new r("ution",-1,4),new r("ateur",-1,2),new r("iqUes",-1,1),new r("atrices",-1,2),new r("ances",-1,1),new r("ences",-1,5),new r("logies",-1,3),new r("ables",-1,1),new r("ismes",-1,1),new r("euses",-1,11),new r("istes",-1,1),new r("ives",-1,8),new r("ifs",-1,8),new r("usions",-1,4),new r("ations",-1,2),new r("utions",-1,4),new r("ateurs",-1,2),new r("ments",-1,15),new r("ements",30,6),new r("issements",31,12),new r("ités",-1,7),new r("ment",-1,15),new r("ement",34,6),new r("issement",35,12),new r("amment",34,13),new r("emment",34,14),new r("aux",-1,10),new r("eaux",39,9),new r("eux",-1,1),new r("ité",-1,7)],x=[new r("ira",-1,1),new r("ie",-1,1),new r("isse",-1,1),new r("issante",-1,1),new r("i",-1,1),new r("irai",4,1),new r("ir",-1,1),new r("iras",-1,1),new r("ies",-1,1),new r("îmes",-1,1),new r("isses",-1,1),new r("issantes",-1,1),new r("îtes",-1,1),new r("is",-1,1),new r("irais",13,1),new r("issais",13,1),new r("irions",-1,1),new r("issions",-1,1),new r("irons",-1,1),new r("issons",-1,1),new r("issants",-1,1),new r("it",-1,1),new r("irait",21,1),new r("issait",21,1),new r("issant",-1,1),new r("iraIent",-1,1),new r("issaIent",-1,1),new r("irent",-1,1),new r("issent",-1,1),new r("iront",-1,1),new r("ît",-1,1),new r("iriez",-1,1),new r("issiez",-1,1),new r("irez",-1,1),new r("issez",-1,1)],I=[new r("a",-1,3),new r("era",0,2),new r("asse",-1,3),new r("ante",-1,3),new r("ée",-1,2),new r("ai",-1,3),new r("erai",5,2),new r("er",-1,2),new r("as",-1,3),new r("eras",8,2),new r("âmes",-1,3),new r("asses",-1,3),new r("antes",-1,3),new r("âtes",-1,3),new r("ées",-1,2),new r("ais",-1,3),new r("erais",15,2),new r("ions",-1,1),new r("erions",17,2),new r("assions",17,3),new r("erons",-1,2),new r("ants",-1,3),new r("és",-1,2),new r("ait",-1,3),new r("erait",23,2),new r("ant",-1,3),new r("aIent",-1,3),new r("eraIent",26,2),new r("èrent",-1,2),new r("assent",-1,3),new r("eront",-1,2),new r("ât",-1,3),new r("ez",-1,2),new r("iez",32,2),new r("eriez",33,2),new r("assiez",33,3),new r("erez",32,2),new r("é",-1,2)],P=[new r("e",-1,3),new r("Ière",0,2),new r("ière",0,2),new r("ion",-1,1),new r("Ier",-1,2),new r("ier",-1,2),new r("ë",-1,4)],U=[new r("ell",-1,-1),new r("eill",-1,-1),new r("enn",-1,-1),new r("onn",-1,-1),new r("ett",-1,-1)],F=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,128,130,103,8,5],S=[1,65,20,0,0,0,0,0,0,0,0,0,0,0,0,0,128],W=new s;this.setCurrent=function(e){W.setCurrent(e)},this.getCurrent=function(){return W.getCurrent()},this.stem=function(){var e=W.cursor;return n(),W.cursor=e,u(),W.limit_backward=e,W.cursor=W.limit,k(),W.cursor=W.limit,b(),W.cursor=W.limit,d(),W.cursor=W.limit_backward,o(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}}(),e.Pipeline.registerFunction(e.fr.stemmer,"stemmer-fr"),e.fr.stopWordFilter=e.generateStopWordFilter("ai aie aient aies ait as au aura aurai auraient aurais aurait auras aurez auriez aurions aurons auront aux avaient avais avait avec avez aviez avions avons ayant ayez ayons c ce ceci celà ces cet cette d dans de des du elle en es est et eu eue eues eurent eus eusse eussent eusses eussiez eussions eut eux eûmes eût eûtes furent fus fusse fussent fusses fussiez fussions fut fûmes fût fûtes ici il ils j je l la le les leur leurs lui m ma mais me mes moi mon même n ne nos notre nous on ont ou par pas pour qu que quel quelle quelles quels qui s sa sans se sera serai seraient serais serait seras serez seriez serions serons seront ses soi soient sois soit sommes son sont soyez soyons suis sur t ta te tes toi ton tu un une vos votre vous y à étaient étais était étant étiez étions été étée étées étés êtes".split(" ")),e.Pipeline.registerFunction(e.fr.stopWordFilter,"stopWordFilter-fr")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.hi.min.js b/assets/javascripts/lunr/min/lunr.hi.min.js deleted file mode 100644 index 7dbc41402cf3..000000000000 --- a/assets/javascripts/lunr/min/lunr.hi.min.js +++ /dev/null @@ -1 +0,0 @@ -!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.hi=function(){this.pipeline.reset(),this.pipeline.add(e.hi.trimmer,e.hi.stopWordFilter,e.hi.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.hi.stemmer))},e.hi.wordCharacters="ऀ-ःऄ-एऐ-टठ-यर-िी-ॏॐ-य़ॠ-९॰-ॿa-zA-Za-zA-Z0-90-9",e.hi.trimmer=e.trimmerSupport.generateTrimmer(e.hi.wordCharacters),e.Pipeline.registerFunction(e.hi.trimmer,"trimmer-hi"),e.hi.stopWordFilter=e.generateStopWordFilter("अत अपना अपनी अपने अभी अंदर आदि आप इत्यादि इन इनका इन्हीं इन्हें इन्हों इस इसका इसकी इसके इसमें इसी इसे उन उनका उनकी उनके उनको उन्हीं उन्हें उन्हों उस उसके उसी उसे एक एवं एस ऐसे और कई कर करता करते करना करने करें कहते कहा का काफ़ी कि कितना किन्हें किन्हों किया किर किस किसी किसे की कुछ कुल के को कोई कौन कौनसा गया घर जब जहाँ जा जितना जिन जिन्हें जिन्हों जिस जिसे जीधर जैसा जैसे जो तक तब तरह तिन तिन्हें तिन्हों तिस तिसे तो था थी थे दबारा दिया दुसरा दूसरे दो द्वारा न नके नहीं ना निहायत नीचे ने पर पहले पूरा पे फिर बनी बही बहुत बाद बाला बिलकुल भी भीतर मगर मानो मे में यदि यह यहाँ यही या यिह ये रखें रहा रहे ऱ्वासा लिए लिये लेकिन व वग़ैरह वर्ग वह वहाँ वहीं वाले वुह वे वो सकता सकते सबसे सभी साथ साबुत साभ सारा से सो संग ही हुआ हुई हुए है हैं हो होता होती होते होना होने".split(" ")),e.hi.stemmer=function(){return function(e){return"function"==typeof e.update?e.update(function(e){return e}):e}}();var r=e.wordcut;r.init(),e.hi.tokenizer=function(i){if(!arguments.length||null==i||void 0==i)return[];if(Array.isArray(i))return i.map(function(r){return isLunr2?new e.Token(r.toLowerCase()):r.toLowerCase()});var t=i.toString().toLowerCase().replace(/^\s+/,"");return r.cut(t).split("|")},e.Pipeline.registerFunction(e.hi.stemmer,"stemmer-hi"),e.Pipeline.registerFunction(e.hi.stopWordFilter,"stopWordFilter-hi")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.hu.min.js b/assets/javascripts/lunr/min/lunr.hu.min.js deleted file mode 100644 index ed9d909f7344..000000000000 --- a/assets/javascripts/lunr/min/lunr.hu.min.js +++ /dev/null @@ -1,18 +0,0 @@ -/*! - * Lunr languages, `Hungarian` language - * https://github.com/MihaiValentin/lunr-languages - * - * Copyright 2014, Mihai Valentin - * http://www.mozilla.org/MPL/ - */ -/*! - * based on - * Snowball JavaScript Library v0.3 - * http://code.google.com/p/urim/ - * http://snowball.tartarus.org/ - * - * Copyright 2010, Oleg Mazko - * http://www.mozilla.org/MPL/ - */ - -!function(e,n){"function"==typeof define&&define.amd?define(n):"object"==typeof exports?module.exports=n():n()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.hu=function(){this.pipeline.reset(),this.pipeline.add(e.hu.trimmer,e.hu.stopWordFilter,e.hu.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.hu.stemmer))},e.hu.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.hu.trimmer=e.trimmerSupport.generateTrimmer(e.hu.wordCharacters),e.Pipeline.registerFunction(e.hu.trimmer,"trimmer-hu"),e.hu.stemmer=function(){var n=e.stemmerSupport.Among,r=e.stemmerSupport.SnowballProgram,i=new function(){function e(){var e,n=L.cursor;if(d=L.limit,L.in_grouping(W,97,252))for(;;){if(e=L.cursor,L.out_grouping(W,97,252))return L.cursor=e,L.find_among(g,8)||(L.cursor=e,e=L.limit)return void(d=e);L.cursor++}if(L.cursor=n,L.out_grouping(W,97,252)){for(;!L.in_grouping(W,97,252);){if(L.cursor>=L.limit)return;L.cursor++}d=L.cursor}}function i(){return d<=L.cursor}function a(){var e;if(L.ket=L.cursor,(e=L.find_among_b(h,2))&&(L.bra=L.cursor,i()))switch(e){case 1:L.slice_from("a");break;case 2:L.slice_from("e")}}function t(){var e=L.limit-L.cursor;return!!L.find_among_b(p,23)&&(L.cursor=L.limit-e,!0)}function s(){if(L.cursor>L.limit_backward){L.cursor--,L.ket=L.cursor;var e=L.cursor-1;L.limit_backward<=e&&e<=L.limit&&(L.cursor=e,L.bra=e,L.slice_del())}}function c(){var e;if(L.ket=L.cursor,(e=L.find_among_b(_,2))&&(L.bra=L.cursor,i())){if((1==e||2==e)&&!t())return;L.slice_del(),s()}}function o(){L.ket=L.cursor,L.find_among_b(v,44)&&(L.bra=L.cursor,i()&&(L.slice_del(),a()))}function w(){var e;if(L.ket=L.cursor,(e=L.find_among_b(z,3))&&(L.bra=L.cursor,i()))switch(e){case 1:L.slice_from("e");break;case 2:case 3:L.slice_from("a")}}function l(){var e;if(L.ket=L.cursor,(e=L.find_among_b(y,6))&&(L.bra=L.cursor,i()))switch(e){case 1:case 2:L.slice_del();break;case 3:L.slice_from("a");break;case 4:L.slice_from("e")}}function u(){var e;if(L.ket=L.cursor,(e=L.find_among_b(j,2))&&(L.bra=L.cursor,i())){if((1==e||2==e)&&!t())return;L.slice_del(),s()}}function m(){var e;if(L.ket=L.cursor,(e=L.find_among_b(C,7))&&(L.bra=L.cursor,i()))switch(e){case 1:L.slice_from("a");break;case 2:L.slice_from("e");break;case 3:case 4:case 5:case 6:case 7:L.slice_del()}}function k(){var e;if(L.ket=L.cursor,(e=L.find_among_b(P,12))&&(L.bra=L.cursor,i()))switch(e){case 1:case 4:case 7:case 9:L.slice_del();break;case 2:case 5:case 8:L.slice_from("e");break;case 3:case 6:L.slice_from("a")}}function f(){var e;if(L.ket=L.cursor,(e=L.find_among_b(F,31))&&(L.bra=L.cursor,i()))switch(e){case 1:case 4:case 7:case 8:case 9:case 12:case 13:case 16:case 17:case 18:L.slice_del();break;case 2:case 5:case 10:case 14:case 19:L.slice_from("a");break;case 3:case 6:case 11:case 15:case 20:L.slice_from("e")}}function b(){var e;if(L.ket=L.cursor,(e=L.find_among_b(S,42))&&(L.bra=L.cursor,i()))switch(e){case 1:case 4:case 5:case 6:case 9:case 10:case 11:case 14:case 15:case 16:case 17:case 20:case 21:case 24:case 25:case 26:case 29:L.slice_del();break;case 2:case 7:case 12:case 18:case 22:case 27:L.slice_from("a");break;case 3:case 8:case 13:case 19:case 23:case 28:L.slice_from("e")}}var d,g=[new n("cs",-1,-1),new n("dzs",-1,-1),new n("gy",-1,-1),new n("ly",-1,-1),new n("ny",-1,-1),new n("sz",-1,-1),new n("ty",-1,-1),new n("zs",-1,-1)],h=[new n("á",-1,1),new n("é",-1,2)],p=[new n("bb",-1,-1),new n("cc",-1,-1),new n("dd",-1,-1),new n("ff",-1,-1),new n("gg",-1,-1),new n("jj",-1,-1),new n("kk",-1,-1),new n("ll",-1,-1),new n("mm",-1,-1),new n("nn",-1,-1),new n("pp",-1,-1),new n("rr",-1,-1),new n("ccs",-1,-1),new n("ss",-1,-1),new n("zzs",-1,-1),new n("tt",-1,-1),new n("vv",-1,-1),new n("ggy",-1,-1),new n("lly",-1,-1),new n("nny",-1,-1),new n("tty",-1,-1),new n("ssz",-1,-1),new n("zz",-1,-1)],_=[new n("al",-1,1),new n("el",-1,2)],v=[new n("ba",-1,-1),new n("ra",-1,-1),new n("be",-1,-1),new n("re",-1,-1),new n("ig",-1,-1),new n("nak",-1,-1),new n("nek",-1,-1),new n("val",-1,-1),new n("vel",-1,-1),new n("ul",-1,-1),new n("nál",-1,-1),new n("nél",-1,-1),new n("ból",-1,-1),new n("ról",-1,-1),new n("tól",-1,-1),new n("bõl",-1,-1),new n("rõl",-1,-1),new n("tõl",-1,-1),new n("ül",-1,-1),new n("n",-1,-1),new n("an",19,-1),new n("ban",20,-1),new n("en",19,-1),new n("ben",22,-1),new n("képpen",22,-1),new n("on",19,-1),new n("ön",19,-1),new n("képp",-1,-1),new n("kor",-1,-1),new n("t",-1,-1),new n("at",29,-1),new n("et",29,-1),new n("ként",29,-1),new n("anként",32,-1),new n("enként",32,-1),new n("onként",32,-1),new n("ot",29,-1),new n("ért",29,-1),new n("öt",29,-1),new n("hez",-1,-1),new n("hoz",-1,-1),new n("höz",-1,-1),new n("vá",-1,-1),new n("vé",-1,-1)],z=[new n("án",-1,2),new n("én",-1,1),new n("ánként",-1,3)],y=[new n("stul",-1,2),new n("astul",0,1),new n("ástul",0,3),new n("stül",-1,2),new n("estül",3,1),new n("éstül",3,4)],j=[new n("á",-1,1),new n("é",-1,2)],C=[new n("k",-1,7),new n("ak",0,4),new n("ek",0,6),new n("ok",0,5),new n("ák",0,1),new n("ék",0,2),new n("ök",0,3)],P=[new n("éi",-1,7),new n("áéi",0,6),new n("ééi",0,5),new n("é",-1,9),new n("ké",3,4),new n("aké",4,1),new n("eké",4,1),new n("oké",4,1),new n("áké",4,3),new n("éké",4,2),new n("öké",4,1),new n("éé",3,8)],F=[new n("a",-1,18),new n("ja",0,17),new n("d",-1,16),new n("ad",2,13),new n("ed",2,13),new n("od",2,13),new n("ád",2,14),new n("éd",2,15),new n("öd",2,13),new n("e",-1,18),new n("je",9,17),new n("nk",-1,4),new n("unk",11,1),new n("ánk",11,2),new n("énk",11,3),new n("ünk",11,1),new n("uk",-1,8),new n("juk",16,7),new n("ájuk",17,5),new n("ük",-1,8),new n("jük",19,7),new n("éjük",20,6),new n("m",-1,12),new n("am",22,9),new n("em",22,9),new n("om",22,9),new n("ám",22,10),new n("ém",22,11),new n("o",-1,18),new n("á",-1,19),new n("é",-1,20)],S=[new n("id",-1,10),new n("aid",0,9),new n("jaid",1,6),new n("eid",0,9),new n("jeid",3,6),new n("áid",0,7),new n("éid",0,8),new n("i",-1,15),new n("ai",7,14),new n("jai",8,11),new n("ei",7,14),new n("jei",10,11),new n("ái",7,12),new n("éi",7,13),new n("itek",-1,24),new n("eitek",14,21),new n("jeitek",15,20),new n("éitek",14,23),new n("ik",-1,29),new n("aik",18,26),new n("jaik",19,25),new n("eik",18,26),new n("jeik",21,25),new n("áik",18,27),new n("éik",18,28),new n("ink",-1,20),new n("aink",25,17),new n("jaink",26,16),new n("eink",25,17),new n("jeink",28,16),new n("áink",25,18),new n("éink",25,19),new n("aitok",-1,21),new n("jaitok",32,20),new n("áitok",-1,22),new n("im",-1,5),new n("aim",35,4),new n("jaim",36,1),new n("eim",35,4),new n("jeim",38,1),new n("áim",35,2),new n("éim",35,3)],W=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,1,17,52,14],L=new r;this.setCurrent=function(e){L.setCurrent(e)},this.getCurrent=function(){return L.getCurrent()},this.stem=function(){var n=L.cursor;return e(),L.limit_backward=n,L.cursor=L.limit,c(),L.cursor=L.limit,o(),L.cursor=L.limit,w(),L.cursor=L.limit,l(),L.cursor=L.limit,u(),L.cursor=L.limit,k(),L.cursor=L.limit,f(),L.cursor=L.limit,b(),L.cursor=L.limit,m(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}}(),e.Pipeline.registerFunction(e.hu.stemmer,"stemmer-hu"),e.hu.stopWordFilter=e.generateStopWordFilter("a abban ahhoz ahogy ahol aki akik akkor alatt amely amelyek amelyekben amelyeket amelyet amelynek ami amikor amit amolyan amíg annak arra arról az azok azon azonban azt aztán azután azzal azért be belül benne bár cikk cikkek cikkeket csak de e ebben eddig egy egyes egyetlen egyik egyre egyéb egész ehhez ekkor el ellen elsõ elég elõ elõször elõtt emilyen ennek erre ez ezek ezen ezt ezzel ezért fel felé hanem hiszen hogy hogyan igen ill ill. illetve ilyen ilyenkor ismét ison itt jobban jó jól kell kellett keressünk keresztül ki kívül között közül legalább legyen lehet lehetett lenne lenni lesz lett maga magát majd majd meg mellett mely melyek mert mi mikor milyen minden mindenki mindent mindig mint mintha mit mivel miért most már más másik még míg nagy nagyobb nagyon ne nekem neki nem nincs néha néhány nélkül olyan ott pedig persze rá s saját sem semmi sok sokat sokkal szemben szerint szinte számára talán tehát teljes tovább továbbá több ugyanis utolsó után utána vagy vagyis vagyok valaki valami valamint való van vannak vele vissza viszont volna volt voltak voltam voltunk által általában át én éppen és így õ õk õket össze úgy új újabb újra".split(" ")),e.Pipeline.registerFunction(e.hu.stopWordFilter,"stopWordFilter-hu")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.it.min.js b/assets/javascripts/lunr/min/lunr.it.min.js deleted file mode 100644 index 344b6a3c0cf8..000000000000 --- a/assets/javascripts/lunr/min/lunr.it.min.js +++ /dev/null @@ -1,18 +0,0 @@ -/*! - * Lunr languages, `Italian` language - * https://github.com/MihaiValentin/lunr-languages - * - * Copyright 2014, Mihai Valentin - * http://www.mozilla.org/MPL/ - */ -/*! - * based on - * Snowball JavaScript Library v0.3 - * http://code.google.com/p/urim/ - * http://snowball.tartarus.org/ - * - * Copyright 2010, Oleg Mazko - * http://www.mozilla.org/MPL/ - */ - -!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.it=function(){this.pipeline.reset(),this.pipeline.add(e.it.trimmer,e.it.stopWordFilter,e.it.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.it.stemmer))},e.it.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.it.trimmer=e.trimmerSupport.generateTrimmer(e.it.wordCharacters),e.Pipeline.registerFunction(e.it.trimmer,"trimmer-it"),e.it.stemmer=function(){var r=e.stemmerSupport.Among,n=e.stemmerSupport.SnowballProgram,i=new function(){function e(e,r,n){return!(!x.eq_s(1,e)||(x.ket=x.cursor,!x.in_grouping(L,97,249)))&&(x.slice_from(r),x.cursor=n,!0)}function i(){for(var r,n,i,o,t=x.cursor;;){if(x.bra=x.cursor,r=x.find_among(h,7))switch(x.ket=x.cursor,r){case 1:x.slice_from("à");continue;case 2:x.slice_from("è");continue;case 3:x.slice_from("ì");continue;case 4:x.slice_from("ò");continue;case 5:x.slice_from("ù");continue;case 6:x.slice_from("qU");continue;case 7:if(x.cursor>=x.limit)break;x.cursor++;continue}break}for(x.cursor=t;;)for(n=x.cursor;;){if(i=x.cursor,x.in_grouping(L,97,249)){if(x.bra=x.cursor,o=x.cursor,e("u","U",i))break;if(x.cursor=o,e("i","I",i))break}if(x.cursor=i,x.cursor>=x.limit)return void(x.cursor=n);x.cursor++}}function o(e){if(x.cursor=e,!x.in_grouping(L,97,249))return!1;for(;!x.out_grouping(L,97,249);){if(x.cursor>=x.limit)return!1;x.cursor++}return!0}function t(){if(x.in_grouping(L,97,249)){var e=x.cursor;if(x.out_grouping(L,97,249)){for(;!x.in_grouping(L,97,249);){if(x.cursor>=x.limit)return o(e);x.cursor++}return!0}return o(e)}return!1}function s(){var e,r=x.cursor;if(!t()){if(x.cursor=r,!x.out_grouping(L,97,249))return;if(e=x.cursor,x.out_grouping(L,97,249)){for(;!x.in_grouping(L,97,249);){if(x.cursor>=x.limit)return x.cursor=e,void(x.in_grouping(L,97,249)&&x.cursor=x.limit)return;x.cursor++}k=x.cursor}function a(){for(;!x.in_grouping(L,97,249);){if(x.cursor>=x.limit)return!1;x.cursor++}for(;!x.out_grouping(L,97,249);){if(x.cursor>=x.limit)return!1;x.cursor++}return!0}function u(){var e=x.cursor;k=x.limit,p=k,g=k,s(),x.cursor=e,a()&&(p=x.cursor,a()&&(g=x.cursor))}function c(){for(var e;;){if(x.bra=x.cursor,!(e=x.find_among(q,3)))break;switch(x.ket=x.cursor,e){case 1:x.slice_from("i");break;case 2:x.slice_from("u");break;case 3:if(x.cursor>=x.limit)return;x.cursor++}}}function w(){return k<=x.cursor}function l(){return p<=x.cursor}function m(){return g<=x.cursor}function f(){var e;if(x.ket=x.cursor,x.find_among_b(C,37)&&(x.bra=x.cursor,(e=x.find_among_b(z,5))&&w()))switch(e){case 1:x.slice_del();break;case 2:x.slice_from("e")}}function v(){var e;if(x.ket=x.cursor,!(e=x.find_among_b(S,51)))return!1;switch(x.bra=x.cursor,e){case 1:if(!m())return!1;x.slice_del();break;case 2:if(!m())return!1;x.slice_del(),x.ket=x.cursor,x.eq_s_b(2,"ic")&&(x.bra=x.cursor,m()&&x.slice_del());break;case 3:if(!m())return!1;x.slice_from("log");break;case 4:if(!m())return!1;x.slice_from("u");break;case 5:if(!m())return!1;x.slice_from("ente");break;case 6:if(!w())return!1;x.slice_del();break;case 7:if(!l())return!1;x.slice_del(),x.ket=x.cursor,e=x.find_among_b(P,4),e&&(x.bra=x.cursor,m()&&(x.slice_del(),1==e&&(x.ket=x.cursor,x.eq_s_b(2,"at")&&(x.bra=x.cursor,m()&&x.slice_del()))));break;case 8:if(!m())return!1;x.slice_del(),x.ket=x.cursor,e=x.find_among_b(F,3),e&&(x.bra=x.cursor,1==e&&m()&&x.slice_del());break;case 9:if(!m())return!1;x.slice_del(),x.ket=x.cursor,x.eq_s_b(2,"at")&&(x.bra=x.cursor,m()&&(x.slice_del(),x.ket=x.cursor,x.eq_s_b(2,"ic")&&(x.bra=x.cursor,m()&&x.slice_del())))}return!0}function b(){var e,r;x.cursor>=k&&(r=x.limit_backward,x.limit_backward=k,x.ket=x.cursor,e=x.find_among_b(W,87),e&&(x.bra=x.cursor,1==e&&x.slice_del()),x.limit_backward=r)}function d(){var e=x.limit-x.cursor;if(x.ket=x.cursor,x.in_grouping_b(y,97,242)&&(x.bra=x.cursor,w()&&(x.slice_del(),x.ket=x.cursor,x.eq_s_b(1,"i")&&(x.bra=x.cursor,w()))))return void x.slice_del();x.cursor=x.limit-e}function _(){d(),x.ket=x.cursor,x.eq_s_b(1,"h")&&(x.bra=x.cursor,x.in_grouping_b(U,99,103)&&w()&&x.slice_del())}var g,p,k,h=[new r("",-1,7),new r("qu",0,6),new r("á",0,1),new r("é",0,2),new r("í",0,3),new r("ó",0,4),new r("ú",0,5)],q=[new r("",-1,3),new r("I",0,1),new r("U",0,2)],C=[new r("la",-1,-1),new r("cela",0,-1),new r("gliela",0,-1),new r("mela",0,-1),new r("tela",0,-1),new r("vela",0,-1),new r("le",-1,-1),new r("cele",6,-1),new r("gliele",6,-1),new r("mele",6,-1),new r("tele",6,-1),new r("vele",6,-1),new r("ne",-1,-1),new r("cene",12,-1),new r("gliene",12,-1),new r("mene",12,-1),new r("sene",12,-1),new r("tene",12,-1),new r("vene",12,-1),new r("ci",-1,-1),new r("li",-1,-1),new r("celi",20,-1),new r("glieli",20,-1),new r("meli",20,-1),new r("teli",20,-1),new r("veli",20,-1),new r("gli",20,-1),new r("mi",-1,-1),new r("si",-1,-1),new r("ti",-1,-1),new r("vi",-1,-1),new r("lo",-1,-1),new r("celo",31,-1),new r("glielo",31,-1),new r("melo",31,-1),new r("telo",31,-1),new r("velo",31,-1)],z=[new r("ando",-1,1),new r("endo",-1,1),new r("ar",-1,2),new r("er",-1,2),new r("ir",-1,2)],P=[new r("ic",-1,-1),new r("abil",-1,-1),new r("os",-1,-1),new r("iv",-1,1)],F=[new r("ic",-1,1),new r("abil",-1,1),new r("iv",-1,1)],S=[new r("ica",-1,1),new r("logia",-1,3),new r("osa",-1,1),new r("ista",-1,1),new r("iva",-1,9),new r("anza",-1,1),new r("enza",-1,5),new r("ice",-1,1),new r("atrice",7,1),new r("iche",-1,1),new r("logie",-1,3),new r("abile",-1,1),new r("ibile",-1,1),new r("usione",-1,4),new r("azione",-1,2),new r("uzione",-1,4),new r("atore",-1,2),new r("ose",-1,1),new r("ante",-1,1),new r("mente",-1,1),new r("amente",19,7),new r("iste",-1,1),new r("ive",-1,9),new r("anze",-1,1),new r("enze",-1,5),new r("ici",-1,1),new r("atrici",25,1),new r("ichi",-1,1),new r("abili",-1,1),new r("ibili",-1,1),new r("ismi",-1,1),new r("usioni",-1,4),new r("azioni",-1,2),new r("uzioni",-1,4),new r("atori",-1,2),new r("osi",-1,1),new r("anti",-1,1),new r("amenti",-1,6),new r("imenti",-1,6),new r("isti",-1,1),new r("ivi",-1,9),new r("ico",-1,1),new r("ismo",-1,1),new r("oso",-1,1),new r("amento",-1,6),new r("imento",-1,6),new r("ivo",-1,9),new r("ità",-1,8),new r("istà",-1,1),new r("istè",-1,1),new r("istì",-1,1)],W=[new r("isca",-1,1),new r("enda",-1,1),new r("ata",-1,1),new r("ita",-1,1),new r("uta",-1,1),new r("ava",-1,1),new r("eva",-1,1),new r("iva",-1,1),new r("erebbe",-1,1),new r("irebbe",-1,1),new r("isce",-1,1),new r("ende",-1,1),new r("are",-1,1),new r("ere",-1,1),new r("ire",-1,1),new r("asse",-1,1),new r("ate",-1,1),new r("avate",16,1),new r("evate",16,1),new r("ivate",16,1),new r("ete",-1,1),new r("erete",20,1),new r("irete",20,1),new r("ite",-1,1),new r("ereste",-1,1),new r("ireste",-1,1),new r("ute",-1,1),new r("erai",-1,1),new r("irai",-1,1),new r("isci",-1,1),new r("endi",-1,1),new r("erei",-1,1),new r("irei",-1,1),new r("assi",-1,1),new r("ati",-1,1),new r("iti",-1,1),new r("eresti",-1,1),new r("iresti",-1,1),new r("uti",-1,1),new r("avi",-1,1),new r("evi",-1,1),new r("ivi",-1,1),new r("isco",-1,1),new r("ando",-1,1),new r("endo",-1,1),new r("Yamo",-1,1),new r("iamo",-1,1),new r("avamo",-1,1),new r("evamo",-1,1),new r("ivamo",-1,1),new r("eremo",-1,1),new r("iremo",-1,1),new r("assimo",-1,1),new r("ammo",-1,1),new r("emmo",-1,1),new r("eremmo",54,1),new r("iremmo",54,1),new r("immo",-1,1),new r("ano",-1,1),new r("iscano",58,1),new r("avano",58,1),new r("evano",58,1),new r("ivano",58,1),new r("eranno",-1,1),new r("iranno",-1,1),new r("ono",-1,1),new r("iscono",65,1),new r("arono",65,1),new r("erono",65,1),new r("irono",65,1),new r("erebbero",-1,1),new r("irebbero",-1,1),new r("assero",-1,1),new r("essero",-1,1),new r("issero",-1,1),new r("ato",-1,1),new r("ito",-1,1),new r("uto",-1,1),new r("avo",-1,1),new r("evo",-1,1),new r("ivo",-1,1),new r("ar",-1,1),new r("ir",-1,1),new r("erà",-1,1),new r("irà",-1,1),new r("erò",-1,1),new r("irò",-1,1)],L=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,128,128,8,2,1],y=[17,65,0,0,0,0,0,0,0,0,0,0,0,0,0,128,128,8,2],U=[17],x=new n;this.setCurrent=function(e){x.setCurrent(e)},this.getCurrent=function(){return x.getCurrent()},this.stem=function(){var e=x.cursor;return i(),x.cursor=e,u(),x.limit_backward=e,x.cursor=x.limit,f(),x.cursor=x.limit,v()||(x.cursor=x.limit,b()),x.cursor=x.limit,_(),x.cursor=x.limit_backward,c(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}}(),e.Pipeline.registerFunction(e.it.stemmer,"stemmer-it"),e.it.stopWordFilter=e.generateStopWordFilter("a abbia abbiamo abbiano abbiate ad agl agli ai al all alla alle allo anche avemmo avendo avesse avessero avessi avessimo aveste avesti avete aveva avevamo avevano avevate avevi avevo avrai avranno avrebbe avrebbero avrei avremmo avremo avreste avresti avrete avrà avrò avuta avute avuti avuto c che chi ci coi col come con contro cui da dagl dagli dai dal dall dalla dalle dallo degl degli dei del dell della delle dello di dov dove e ebbe ebbero ebbi ed era erano eravamo eravate eri ero essendo faccia facciamo facciano facciate faccio facemmo facendo facesse facessero facessi facessimo faceste facesti faceva facevamo facevano facevate facevi facevo fai fanno farai faranno farebbe farebbero farei faremmo faremo fareste faresti farete farà farò fece fecero feci fosse fossero fossi fossimo foste fosti fu fui fummo furono gli ha hai hanno ho i il in io l la le lei li lo loro lui ma mi mia mie miei mio ne negl negli nei nel nell nella nelle nello noi non nostra nostre nostri nostro o per perché più quale quanta quante quanti quanto quella quelle quelli quello questa queste questi questo sarai saranno sarebbe sarebbero sarei saremmo saremo sareste saresti sarete sarà sarò se sei si sia siamo siano siate siete sono sta stai stando stanno starai staranno starebbe starebbero starei staremmo staremo stareste staresti starete starà starò stava stavamo stavano stavate stavi stavo stemmo stesse stessero stessi stessimo steste stesti stette stettero stetti stia stiamo stiano stiate sto su sua sue sugl sugli sui sul sull sulla sulle sullo suo suoi ti tra tu tua tue tuo tuoi tutti tutto un una uno vi voi vostra vostre vostri vostro è".split(" ")),e.Pipeline.registerFunction(e.it.stopWordFilter,"stopWordFilter-it")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.ja.min.js b/assets/javascripts/lunr/min/lunr.ja.min.js deleted file mode 100644 index 5f254ebe91fa..000000000000 --- a/assets/javascripts/lunr/min/lunr.ja.min.js +++ /dev/null @@ -1 +0,0 @@ -!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var r="2"==e.version[0];e.ja=function(){this.pipeline.reset(),this.pipeline.add(e.ja.trimmer,e.ja.stopWordFilter,e.ja.stemmer),r?this.tokenizer=e.ja.tokenizer:(e.tokenizer&&(e.tokenizer=e.ja.tokenizer),this.tokenizerFn&&(this.tokenizerFn=e.ja.tokenizer))};var t=new e.TinySegmenter;e.ja.tokenizer=function(i){var n,o,s,p,a,u,m,l,c,f;if(!arguments.length||null==i||void 0==i)return[];if(Array.isArray(i))return i.map(function(t){return r?new e.Token(t.toLowerCase()):t.toLowerCase()});for(o=i.toString().toLowerCase().replace(/^\s+/,""),n=o.length-1;n>=0;n--)if(/\S/.test(o.charAt(n))){o=o.substring(0,n+1);break}for(a=[],s=o.length,c=0,l=0;c<=s;c++)if(u=o.charAt(c),m=c-l,u.match(/\s/)||c==s){if(m>0)for(p=t.segment(o.slice(l,c)).filter(function(e){return!!e}),f=l,n=0;n=C.limit)break;C.cursor++;continue}break}for(C.cursor=o,C.bra=o,C.eq_s(1,"y")?(C.ket=C.cursor,C.slice_from("Y")):C.cursor=o;;)if(e=C.cursor,C.in_grouping(q,97,232)){if(i=C.cursor,C.bra=i,C.eq_s(1,"i"))C.ket=C.cursor,C.in_grouping(q,97,232)&&(C.slice_from("I"),C.cursor=e);else if(C.cursor=i,C.eq_s(1,"y"))C.ket=C.cursor,C.slice_from("Y"),C.cursor=e;else if(n(e))break}else if(n(e))break}function n(r){return C.cursor=r,r>=C.limit||(C.cursor++,!1)}function o(){_=C.limit,d=_,t()||(_=C.cursor,_<3&&(_=3),t()||(d=C.cursor))}function t(){for(;!C.in_grouping(q,97,232);){if(C.cursor>=C.limit)return!0;C.cursor++}for(;!C.out_grouping(q,97,232);){if(C.cursor>=C.limit)return!0;C.cursor++}return!1}function s(){for(var r;;)if(C.bra=C.cursor,r=C.find_among(p,3))switch(C.ket=C.cursor,r){case 1:C.slice_from("y");break;case 2:C.slice_from("i");break;case 3:if(C.cursor>=C.limit)return;C.cursor++}}function u(){return _<=C.cursor}function c(){return d<=C.cursor}function a(){var r=C.limit-C.cursor;C.find_among_b(g,3)&&(C.cursor=C.limit-r,C.ket=C.cursor,C.cursor>C.limit_backward&&(C.cursor--,C.bra=C.cursor,C.slice_del()))}function l(){var r;w=!1,C.ket=C.cursor,C.eq_s_b(1,"e")&&(C.bra=C.cursor,u()&&(r=C.limit-C.cursor,C.out_grouping_b(q,97,232)&&(C.cursor=C.limit-r,C.slice_del(),w=!0,a())))}function m(){var r;u()&&(r=C.limit-C.cursor,C.out_grouping_b(q,97,232)&&(C.cursor=C.limit-r,C.eq_s_b(3,"gem")||(C.cursor=C.limit-r,C.slice_del(),a())))}function f(){var r,e,i,n,o,t,s=C.limit-C.cursor;if(C.ket=C.cursor,r=C.find_among_b(h,5))switch(C.bra=C.cursor,r){case 1:u()&&C.slice_from("heid");break;case 2:m();break;case 3:u()&&C.out_grouping_b(j,97,232)&&C.slice_del()}if(C.cursor=C.limit-s,l(),C.cursor=C.limit-s,C.ket=C.cursor,C.eq_s_b(4,"heid")&&(C.bra=C.cursor,c()&&(e=C.limit-C.cursor,C.eq_s_b(1,"c")||(C.cursor=C.limit-e,C.slice_del(),C.ket=C.cursor,C.eq_s_b(2,"en")&&(C.bra=C.cursor,m())))),C.cursor=C.limit-s,C.ket=C.cursor,r=C.find_among_b(k,6))switch(C.bra=C.cursor,r){case 1:if(c()){if(C.slice_del(),i=C.limit-C.cursor,C.ket=C.cursor,C.eq_s_b(2,"ig")&&(C.bra=C.cursor,c()&&(n=C.limit-C.cursor,!C.eq_s_b(1,"e")))){C.cursor=C.limit-n,C.slice_del();break}C.cursor=C.limit-i,a()}break;case 2:c()&&(o=C.limit-C.cursor,C.eq_s_b(1,"e")||(C.cursor=C.limit-o,C.slice_del()));break;case 3:c()&&(C.slice_del(),l());break;case 4:c()&&C.slice_del();break;case 5:c()&&w&&C.slice_del()}C.cursor=C.limit-s,C.out_grouping_b(z,73,232)&&(t=C.limit-C.cursor,C.find_among_b(v,4)&&C.out_grouping_b(q,97,232)&&(C.cursor=C.limit-t,C.ket=C.cursor,C.cursor>C.limit_backward&&(C.cursor--,C.bra=C.cursor,C.slice_del())))}var d,_,w,b=[new e("",-1,6),new e("á",0,1),new e("ä",0,1),new e("é",0,2),new e("ë",0,2),new e("í",0,3),new e("ï",0,3),new e("ó",0,4),new e("ö",0,4),new e("ú",0,5),new e("ü",0,5)],p=[new e("",-1,3),new e("I",0,2),new e("Y",0,1)],g=[new e("dd",-1,-1),new e("kk",-1,-1),new e("tt",-1,-1)],h=[new e("ene",-1,2),new e("se",-1,3),new e("en",-1,2),new e("heden",2,1),new e("s",-1,3)],k=[new e("end",-1,1),new e("ig",-1,2),new e("ing",-1,1),new e("lijk",-1,3),new e("baar",-1,4),new e("bar",-1,5)],v=[new e("aa",-1,-1),new e("ee",-1,-1),new e("oo",-1,-1),new e("uu",-1,-1)],q=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],z=[1,0,0,17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],j=[17,67,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],C=new i;this.setCurrent=function(r){C.setCurrent(r)},this.getCurrent=function(){return C.getCurrent()},this.stem=function(){var e=C.cursor;return r(),C.cursor=e,o(),C.limit_backward=e,C.cursor=C.limit,f(),C.cursor=C.limit_backward,s(),!0}};return function(r){return"function"==typeof r.update?r.update(function(r){return n.setCurrent(r),n.stem(),n.getCurrent()}):(n.setCurrent(r),n.stem(),n.getCurrent())}}(),r.Pipeline.registerFunction(r.nl.stemmer,"stemmer-nl"),r.nl.stopWordFilter=r.generateStopWordFilter(" aan al alles als altijd andere ben bij daar dan dat de der deze die dit doch doen door dus een eens en er ge geen geweest haar had heb hebben heeft hem het hier hij hoe hun iemand iets ik in is ja je kan kon kunnen maar me meer men met mij mijn moet na naar niet niets nog nu of om omdat onder ons ook op over reeds te tegen toch toen tot u uit uw van veel voor want waren was wat werd wezen wie wil worden wordt zal ze zelf zich zij zijn zo zonder zou".split(" ")),r.Pipeline.registerFunction(r.nl.stopWordFilter,"stopWordFilter-nl")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.no.min.js b/assets/javascripts/lunr/min/lunr.no.min.js deleted file mode 100644 index 92bc7e4e8944..000000000000 --- a/assets/javascripts/lunr/min/lunr.no.min.js +++ /dev/null @@ -1,18 +0,0 @@ -/*! - * Lunr languages, `Norwegian` language - * https://github.com/MihaiValentin/lunr-languages - * - * Copyright 2014, Mihai Valentin - * http://www.mozilla.org/MPL/ - */ -/*! - * based on - * Snowball JavaScript Library v0.3 - * http://code.google.com/p/urim/ - * http://snowball.tartarus.org/ - * - * Copyright 2010, Oleg Mazko - * http://www.mozilla.org/MPL/ - */ - -!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.no=function(){this.pipeline.reset(),this.pipeline.add(e.no.trimmer,e.no.stopWordFilter,e.no.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.no.stemmer))},e.no.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.no.trimmer=e.trimmerSupport.generateTrimmer(e.no.wordCharacters),e.Pipeline.registerFunction(e.no.trimmer,"trimmer-no"),e.no.stemmer=function(){var r=e.stemmerSupport.Among,n=e.stemmerSupport.SnowballProgram,i=new function(){function e(){var e,r=w.cursor+3;if(a=w.limit,0<=r||r<=w.limit){for(s=r;;){if(e=w.cursor,w.in_grouping(d,97,248)){w.cursor=e;break}if(e>=w.limit)return;w.cursor=e+1}for(;!w.out_grouping(d,97,248);){if(w.cursor>=w.limit)return;w.cursor++}a=w.cursor,a=a&&(r=w.limit_backward,w.limit_backward=a,w.ket=w.cursor,e=w.find_among_b(m,29),w.limit_backward=r,e))switch(w.bra=w.cursor,e){case 1:w.slice_del();break;case 2:n=w.limit-w.cursor,w.in_grouping_b(c,98,122)?w.slice_del():(w.cursor=w.limit-n,w.eq_s_b(1,"k")&&w.out_grouping_b(d,97,248)&&w.slice_del());break;case 3:w.slice_from("er")}}function t(){var e,r=w.limit-w.cursor;w.cursor>=a&&(e=w.limit_backward,w.limit_backward=a,w.ket=w.cursor,w.find_among_b(u,2)?(w.bra=w.cursor,w.limit_backward=e,w.cursor=w.limit-r,w.cursor>w.limit_backward&&(w.cursor--,w.bra=w.cursor,w.slice_del())):w.limit_backward=e)}function o(){var e,r;w.cursor>=a&&(r=w.limit_backward,w.limit_backward=a,w.ket=w.cursor,e=w.find_among_b(l,11),e?(w.bra=w.cursor,w.limit_backward=r,1==e&&w.slice_del()):w.limit_backward=r)}var s,a,m=[new r("a",-1,1),new r("e",-1,1),new r("ede",1,1),new r("ande",1,1),new r("ende",1,1),new r("ane",1,1),new r("ene",1,1),new r("hetene",6,1),new r("erte",1,3),new r("en",-1,1),new r("heten",9,1),new r("ar",-1,1),new r("er",-1,1),new r("heter",12,1),new r("s",-1,2),new r("as",14,1),new r("es",14,1),new r("edes",16,1),new r("endes",16,1),new r("enes",16,1),new r("hetenes",19,1),new r("ens",14,1),new r("hetens",21,1),new r("ers",14,1),new r("ets",14,1),new r("et",-1,1),new r("het",25,1),new r("ert",-1,3),new r("ast",-1,1)],u=[new r("dt",-1,-1),new r("vt",-1,-1)],l=[new r("leg",-1,1),new r("eleg",0,1),new r("ig",-1,1),new r("eig",2,1),new r("lig",2,1),new r("elig",4,1),new r("els",-1,1),new r("lov",-1,1),new r("elov",7,1),new r("slov",7,1),new r("hetslov",9,1)],d=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,48,0,128],c=[119,125,149,1],w=new n;this.setCurrent=function(e){w.setCurrent(e)},this.getCurrent=function(){return w.getCurrent()},this.stem=function(){var r=w.cursor;return e(),w.limit_backward=r,w.cursor=w.limit,i(),w.cursor=w.limit,t(),w.cursor=w.limit,o(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}}(),e.Pipeline.registerFunction(e.no.stemmer,"stemmer-no"),e.no.stopWordFilter=e.generateStopWordFilter("alle at av bare begge ble blei bli blir blitt både båe da de deg dei deim deira deires dem den denne der dere deres det dette di din disse ditt du dykk dykkar då eg ein eit eitt eller elles en enn er et ett etter for fordi fra før ha hadde han hans har hennar henne hennes her hjå ho hoe honom hoss hossen hun hva hvem hver hvilke hvilken hvis hvor hvordan hvorfor i ikke ikkje ikkje ingen ingi inkje inn inni ja jeg kan kom korleis korso kun kunne kva kvar kvarhelst kven kvi kvifor man mange me med medan meg meget mellom men mi min mine mitt mot mykje ned no noe noen noka noko nokon nokor nokre nå når og også om opp oss over på samme seg selv si si sia sidan siden sin sine sitt sjøl skal skulle slik so som som somme somt så sånn til um upp ut uten var vart varte ved vere verte vi vil ville vore vors vort vår være være vært å".split(" ")),e.Pipeline.registerFunction(e.no.stopWordFilter,"stopWordFilter-no")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.pt.min.js b/assets/javascripts/lunr/min/lunr.pt.min.js deleted file mode 100644 index 6c16996d6509..000000000000 --- a/assets/javascripts/lunr/min/lunr.pt.min.js +++ /dev/null @@ -1,18 +0,0 @@ -/*! - * Lunr languages, `Portuguese` language - * https://github.com/MihaiValentin/lunr-languages - * - * Copyright 2014, Mihai Valentin - * http://www.mozilla.org/MPL/ - */ -/*! - * based on - * Snowball JavaScript Library v0.3 - * http://code.google.com/p/urim/ - * http://snowball.tartarus.org/ - * - * Copyright 2010, Oleg Mazko - * http://www.mozilla.org/MPL/ - */ - -!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.pt=function(){this.pipeline.reset(),this.pipeline.add(e.pt.trimmer,e.pt.stopWordFilter,e.pt.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.pt.stemmer))},e.pt.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.pt.trimmer=e.trimmerSupport.generateTrimmer(e.pt.wordCharacters),e.Pipeline.registerFunction(e.pt.trimmer,"trimmer-pt"),e.pt.stemmer=function(){var r=e.stemmerSupport.Among,s=e.stemmerSupport.SnowballProgram,n=new function(){function e(){for(var e;;){if(z.bra=z.cursor,e=z.find_among(k,3))switch(z.ket=z.cursor,e){case 1:z.slice_from("a~");continue;case 2:z.slice_from("o~");continue;case 3:if(z.cursor>=z.limit)break;z.cursor++;continue}break}}function n(){if(z.out_grouping(y,97,250)){for(;!z.in_grouping(y,97,250);){if(z.cursor>=z.limit)return!0;z.cursor++}return!1}return!0}function i(){if(z.in_grouping(y,97,250))for(;!z.out_grouping(y,97,250);){if(z.cursor>=z.limit)return!1;z.cursor++}return g=z.cursor,!0}function o(){var e,r,s=z.cursor;if(z.in_grouping(y,97,250))if(e=z.cursor,n()){if(z.cursor=e,i())return}else g=z.cursor;if(z.cursor=s,z.out_grouping(y,97,250)){if(r=z.cursor,n()){if(z.cursor=r,!z.in_grouping(y,97,250)||z.cursor>=z.limit)return;z.cursor++}g=z.cursor}}function t(){for(;!z.in_grouping(y,97,250);){if(z.cursor>=z.limit)return!1;z.cursor++}for(;!z.out_grouping(y,97,250);){if(z.cursor>=z.limit)return!1;z.cursor++}return!0}function a(){var e=z.cursor;g=z.limit,b=g,h=g,o(),z.cursor=e,t()&&(b=z.cursor,t()&&(h=z.cursor))}function u(){for(var e;;){if(z.bra=z.cursor,e=z.find_among(q,3))switch(z.ket=z.cursor,e){case 1:z.slice_from("ã");continue;case 2:z.slice_from("õ");continue;case 3:if(z.cursor>=z.limit)break;z.cursor++;continue}break}}function w(){return g<=z.cursor}function m(){return b<=z.cursor}function c(){return h<=z.cursor}function l(){var e;if(z.ket=z.cursor,!(e=z.find_among_b(F,45)))return!1;switch(z.bra=z.cursor,e){case 1:if(!c())return!1;z.slice_del();break;case 2:if(!c())return!1;z.slice_from("log");break;case 3:if(!c())return!1;z.slice_from("u");break;case 4:if(!c())return!1;z.slice_from("ente");break;case 5:if(!m())return!1;z.slice_del(),z.ket=z.cursor,e=z.find_among_b(j,4),e&&(z.bra=z.cursor,c()&&(z.slice_del(),1==e&&(z.ket=z.cursor,z.eq_s_b(2,"at")&&(z.bra=z.cursor,c()&&z.slice_del()))));break;case 6:if(!c())return!1;z.slice_del(),z.ket=z.cursor,e=z.find_among_b(C,3),e&&(z.bra=z.cursor,1==e&&c()&&z.slice_del());break;case 7:if(!c())return!1;z.slice_del(),z.ket=z.cursor,e=z.find_among_b(P,3),e&&(z.bra=z.cursor,1==e&&c()&&z.slice_del());break;case 8:if(!c())return!1;z.slice_del(),z.ket=z.cursor,z.eq_s_b(2,"at")&&(z.bra=z.cursor,c()&&z.slice_del());break;case 9:if(!w()||!z.eq_s_b(1,"e"))return!1;z.slice_from("ir")}return!0}function f(){var e,r;if(z.cursor>=g){if(r=z.limit_backward,z.limit_backward=g,z.ket=z.cursor,e=z.find_among_b(S,120))return z.bra=z.cursor,1==e&&z.slice_del(),z.limit_backward=r,!0;z.limit_backward=r}return!1}function d(){var e;z.ket=z.cursor,(e=z.find_among_b(W,7))&&(z.bra=z.cursor,1==e&&w()&&z.slice_del())}function v(e,r){if(z.eq_s_b(1,e)){z.bra=z.cursor;var s=z.limit-z.cursor;if(z.eq_s_b(1,r))return z.cursor=z.limit-s,w()&&z.slice_del(),!1}return!0}function p(){var e;if(z.ket=z.cursor,e=z.find_among_b(L,4))switch(z.bra=z.cursor,e){case 1:w()&&(z.slice_del(),z.ket=z.cursor,z.limit-z.cursor,v("u","g")&&v("i","c"));break;case 2:z.slice_from("c")}}function _(){if(!l()&&(z.cursor=z.limit,!f()))return z.cursor=z.limit,void d();z.cursor=z.limit,z.ket=z.cursor,z.eq_s_b(1,"i")&&(z.bra=z.cursor,z.eq_s_b(1,"c")&&(z.cursor=z.limit,w()&&z.slice_del()))}var h,b,g,k=[new r("",-1,3),new r("ã",0,1),new r("õ",0,2)],q=[new r("",-1,3),new r("a~",0,1),new r("o~",0,2)],j=[new r("ic",-1,-1),new r("ad",-1,-1),new r("os",-1,-1),new r("iv",-1,1)],C=[new r("ante",-1,1),new r("avel",-1,1),new r("ível",-1,1)],P=[new r("ic",-1,1),new r("abil",-1,1),new r("iv",-1,1)],F=[new r("ica",-1,1),new r("ância",-1,1),new r("ência",-1,4),new r("ira",-1,9),new r("adora",-1,1),new r("osa",-1,1),new r("ista",-1,1),new r("iva",-1,8),new r("eza",-1,1),new r("logía",-1,2),new r("idade",-1,7),new r("ante",-1,1),new r("mente",-1,6),new r("amente",12,5),new r("ável",-1,1),new r("ível",-1,1),new r("ución",-1,3),new r("ico",-1,1),new r("ismo",-1,1),new r("oso",-1,1),new r("amento",-1,1),new r("imento",-1,1),new r("ivo",-1,8),new r("aça~o",-1,1),new r("ador",-1,1),new r("icas",-1,1),new r("ências",-1,4),new r("iras",-1,9),new r("adoras",-1,1),new r("osas",-1,1),new r("istas",-1,1),new r("ivas",-1,8),new r("ezas",-1,1),new r("logías",-1,2),new r("idades",-1,7),new r("uciones",-1,3),new r("adores",-1,1),new r("antes",-1,1),new r("aço~es",-1,1),new r("icos",-1,1),new r("ismos",-1,1),new r("osos",-1,1),new r("amentos",-1,1),new r("imentos",-1,1),new r("ivos",-1,8)],S=[new r("ada",-1,1),new r("ida",-1,1),new r("ia",-1,1),new r("aria",2,1),new r("eria",2,1),new r("iria",2,1),new r("ara",-1,1),new r("era",-1,1),new r("ira",-1,1),new r("ava",-1,1),new r("asse",-1,1),new r("esse",-1,1),new r("isse",-1,1),new r("aste",-1,1),new r("este",-1,1),new r("iste",-1,1),new r("ei",-1,1),new r("arei",16,1),new r("erei",16,1),new r("irei",16,1),new r("am",-1,1),new r("iam",20,1),new r("ariam",21,1),new r("eriam",21,1),new r("iriam",21,1),new r("aram",20,1),new r("eram",20,1),new r("iram",20,1),new r("avam",20,1),new r("em",-1,1),new r("arem",29,1),new r("erem",29,1),new r("irem",29,1),new r("assem",29,1),new r("essem",29,1),new r("issem",29,1),new r("ado",-1,1),new r("ido",-1,1),new r("ando",-1,1),new r("endo",-1,1),new r("indo",-1,1),new r("ara~o",-1,1),new r("era~o",-1,1),new r("ira~o",-1,1),new r("ar",-1,1),new r("er",-1,1),new r("ir",-1,1),new r("as",-1,1),new r("adas",47,1),new r("idas",47,1),new r("ias",47,1),new r("arias",50,1),new r("erias",50,1),new r("irias",50,1),new r("aras",47,1),new r("eras",47,1),new r("iras",47,1),new r("avas",47,1),new r("es",-1,1),new r("ardes",58,1),new r("erdes",58,1),new r("irdes",58,1),new r("ares",58,1),new r("eres",58,1),new r("ires",58,1),new r("asses",58,1),new r("esses",58,1),new r("isses",58,1),new r("astes",58,1),new r("estes",58,1),new r("istes",58,1),new r("is",-1,1),new r("ais",71,1),new r("eis",71,1),new r("areis",73,1),new r("ereis",73,1),new r("ireis",73,1),new r("áreis",73,1),new r("éreis",73,1),new r("íreis",73,1),new r("ásseis",73,1),new r("ésseis",73,1),new r("ísseis",73,1),new r("áveis",73,1),new r("íeis",73,1),new r("aríeis",84,1),new r("eríeis",84,1),new r("iríeis",84,1),new r("ados",-1,1),new r("idos",-1,1),new r("amos",-1,1),new r("áramos",90,1),new r("éramos",90,1),new r("íramos",90,1),new r("ávamos",90,1),new r("íamos",90,1),new r("aríamos",95,1),new r("eríamos",95,1),new r("iríamos",95,1),new r("emos",-1,1),new r("aremos",99,1),new r("eremos",99,1),new r("iremos",99,1),new r("ássemos",99,1),new r("êssemos",99,1),new r("íssemos",99,1),new r("imos",-1,1),new r("armos",-1,1),new r("ermos",-1,1),new r("irmos",-1,1),new r("ámos",-1,1),new r("arás",-1,1),new r("erás",-1,1),new r("irás",-1,1),new r("eu",-1,1),new r("iu",-1,1),new r("ou",-1,1),new r("ará",-1,1),new r("erá",-1,1),new r("irá",-1,1)],W=[new r("a",-1,1),new r("i",-1,1),new r("o",-1,1),new r("os",-1,1),new r("á",-1,1),new r("í",-1,1),new r("ó",-1,1)],L=[new r("e",-1,1),new r("ç",-1,2),new r("é",-1,1),new r("ê",-1,1)],y=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,3,19,12,2],z=new s;this.setCurrent=function(e){z.setCurrent(e)},this.getCurrent=function(){return z.getCurrent()},this.stem=function(){var r=z.cursor;return e(),z.cursor=r,a(),z.limit_backward=r,z.cursor=z.limit,_(),z.cursor=z.limit,p(),z.cursor=z.limit_backward,u(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}}(),e.Pipeline.registerFunction(e.pt.stemmer,"stemmer-pt"),e.pt.stopWordFilter=e.generateStopWordFilter("a ao aos aquela aquelas aquele aqueles aquilo as até com como da das de dela delas dele deles depois do dos e ela elas ele eles em entre era eram essa essas esse esses esta estamos estas estava estavam este esteja estejam estejamos estes esteve estive estivemos estiver estivera estiveram estiverem estivermos estivesse estivessem estivéramos estivéssemos estou está estávamos estão eu foi fomos for fora foram forem formos fosse fossem fui fôramos fôssemos haja hajam hajamos havemos hei houve houvemos houver houvera houveram houverei houverem houveremos houveria houveriam houvermos houverá houverão houveríamos houvesse houvessem houvéramos houvéssemos há hão isso isto já lhe lhes mais mas me mesmo meu meus minha minhas muito na nas nem no nos nossa nossas nosso nossos num numa não nós o os ou para pela pelas pelo pelos por qual quando que quem se seja sejam sejamos sem serei seremos seria seriam será serão seríamos seu seus somos sou sua suas são só também te tem temos tenha tenham tenhamos tenho terei teremos teria teriam terá terão teríamos teu teus teve tinha tinham tive tivemos tiver tivera tiveram tiverem tivermos tivesse tivessem tivéramos tivéssemos tu tua tuas tém tínhamos um uma você vocês vos à às éramos".split(" ")),e.Pipeline.registerFunction(e.pt.stopWordFilter,"stopWordFilter-pt")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.ro.min.js b/assets/javascripts/lunr/min/lunr.ro.min.js deleted file mode 100644 index 727714018182..000000000000 --- a/assets/javascripts/lunr/min/lunr.ro.min.js +++ /dev/null @@ -1,18 +0,0 @@ -/*! - * Lunr languages, `Romanian` language - * https://github.com/MihaiValentin/lunr-languages - * - * Copyright 2014, Mihai Valentin - * http://www.mozilla.org/MPL/ - */ -/*! - * based on - * Snowball JavaScript Library v0.3 - * http://code.google.com/p/urim/ - * http://snowball.tartarus.org/ - * - * Copyright 2010, Oleg Mazko - * http://www.mozilla.org/MPL/ - */ - -!function(e,i){"function"==typeof define&&define.amd?define(i):"object"==typeof exports?module.exports=i():i()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.ro=function(){this.pipeline.reset(),this.pipeline.add(e.ro.trimmer,e.ro.stopWordFilter,e.ro.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.ro.stemmer))},e.ro.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.ro.trimmer=e.trimmerSupport.generateTrimmer(e.ro.wordCharacters),e.Pipeline.registerFunction(e.ro.trimmer,"trimmer-ro"),e.ro.stemmer=function(){var i=e.stemmerSupport.Among,r=e.stemmerSupport.SnowballProgram,n=new function(){function e(e,i){L.eq_s(1,e)&&(L.ket=L.cursor,L.in_grouping(W,97,259)&&L.slice_from(i))}function n(){for(var i,r;;){if(i=L.cursor,L.in_grouping(W,97,259)&&(r=L.cursor,L.bra=r,e("u","U"),L.cursor=r,e("i","I")),L.cursor=i,L.cursor>=L.limit)break;L.cursor++}}function t(){if(L.out_grouping(W,97,259)){for(;!L.in_grouping(W,97,259);){if(L.cursor>=L.limit)return!0;L.cursor++}return!1}return!0}function a(){if(L.in_grouping(W,97,259))for(;!L.out_grouping(W,97,259);){if(L.cursor>=L.limit)return!0;L.cursor++}return!1}function o(){var e,i,r=L.cursor;if(L.in_grouping(W,97,259)){if(e=L.cursor,!t())return void(h=L.cursor);if(L.cursor=e,!a())return void(h=L.cursor)}L.cursor=r,L.out_grouping(W,97,259)&&(i=L.cursor,t()&&(L.cursor=i,L.in_grouping(W,97,259)&&L.cursor=L.limit)return!1;L.cursor++}for(;!L.out_grouping(W,97,259);){if(L.cursor>=L.limit)return!1;L.cursor++}return!0}function c(){var e=L.cursor;h=L.limit,k=h,g=h,o(),L.cursor=e,u()&&(k=L.cursor,u()&&(g=L.cursor))}function s(){for(var e;;){if(L.bra=L.cursor,e=L.find_among(z,3))switch(L.ket=L.cursor,e){case 1:L.slice_from("i");continue;case 2:L.slice_from("u");continue;case 3:if(L.cursor>=L.limit)break;L.cursor++;continue}break}}function w(){return h<=L.cursor}function m(){return k<=L.cursor}function l(){return g<=L.cursor}function f(){var e,i;if(L.ket=L.cursor,(e=L.find_among_b(C,16))&&(L.bra=L.cursor,m()))switch(e){case 1:L.slice_del();break;case 2:L.slice_from("a");break;case 3:L.slice_from("e");break;case 4:L.slice_from("i");break;case 5:i=L.limit-L.cursor,L.eq_s_b(2,"ab")||(L.cursor=L.limit-i,L.slice_from("i"));break;case 6:L.slice_from("at");break;case 7:L.slice_from("aţi")}}function p(){var e,i=L.limit-L.cursor;if(L.ket=L.cursor,(e=L.find_among_b(P,46))&&(L.bra=L.cursor,m())){switch(e){case 1:L.slice_from("abil");break;case 2:L.slice_from("ibil");break;case 3:L.slice_from("iv");break;case 4:L.slice_from("ic");break;case 5:L.slice_from("at");break;case 6:L.slice_from("it")}return _=!0,L.cursor=L.limit-i,!0}return!1}function d(){var e,i;for(_=!1;;)if(i=L.limit-L.cursor,!p()){L.cursor=L.limit-i;break}if(L.ket=L.cursor,(e=L.find_among_b(F,62))&&(L.bra=L.cursor,l())){switch(e){case 1:L.slice_del();break;case 2:L.eq_s_b(1,"ţ")&&(L.bra=L.cursor,L.slice_from("t"));break;case 3:L.slice_from("ist")}_=!0}}function b(){var e,i,r;if(L.cursor>=h){if(i=L.limit_backward,L.limit_backward=h,L.ket=L.cursor,e=L.find_among_b(q,94))switch(L.bra=L.cursor,e){case 1:if(r=L.limit-L.cursor,!L.out_grouping_b(W,97,259)&&(L.cursor=L.limit-r,!L.eq_s_b(1,"u")))break;case 2:L.slice_del()}L.limit_backward=i}}function v(){var e;L.ket=L.cursor,(e=L.find_among_b(S,5))&&(L.bra=L.cursor,w()&&1==e&&L.slice_del())}var _,g,k,h,z=[new i("",-1,3),new i("I",0,1),new i("U",0,2)],C=[new i("ea",-1,3),new i("aţia",-1,7),new i("aua",-1,2),new i("iua",-1,4),new i("aţie",-1,7),new i("ele",-1,3),new i("ile",-1,5),new i("iile",6,4),new i("iei",-1,4),new i("atei",-1,6),new i("ii",-1,4),new i("ului",-1,1),new i("ul",-1,1),new i("elor",-1,3),new i("ilor",-1,4),new i("iilor",14,4)],P=[new i("icala",-1,4),new i("iciva",-1,4),new i("ativa",-1,5),new i("itiva",-1,6),new i("icale",-1,4),new i("aţiune",-1,5),new i("iţiune",-1,6),new i("atoare",-1,5),new i("itoare",-1,6),new i("ătoare",-1,5),new i("icitate",-1,4),new i("abilitate",-1,1),new i("ibilitate",-1,2),new i("ivitate",-1,3),new i("icive",-1,4),new i("ative",-1,5),new i("itive",-1,6),new i("icali",-1,4),new i("atori",-1,5),new i("icatori",18,4),new i("itori",-1,6),new i("ători",-1,5),new i("icitati",-1,4),new i("abilitati",-1,1),new i("ivitati",-1,3),new i("icivi",-1,4),new i("ativi",-1,5),new i("itivi",-1,6),new i("icităi",-1,4),new i("abilităi",-1,1),new i("ivităi",-1,3),new i("icităţi",-1,4),new i("abilităţi",-1,1),new i("ivităţi",-1,3),new i("ical",-1,4),new i("ator",-1,5),new i("icator",35,4),new i("itor",-1,6),new i("ător",-1,5),new i("iciv",-1,4),new i("ativ",-1,5),new i("itiv",-1,6),new i("icală",-1,4),new i("icivă",-1,4),new i("ativă",-1,5),new i("itivă",-1,6)],F=[new i("ica",-1,1),new i("abila",-1,1),new i("ibila",-1,1),new i("oasa",-1,1),new i("ata",-1,1),new i("ita",-1,1),new i("anta",-1,1),new i("ista",-1,3),new i("uta",-1,1),new i("iva",-1,1),new i("ic",-1,1),new i("ice",-1,1),new i("abile",-1,1),new i("ibile",-1,1),new i("isme",-1,3),new i("iune",-1,2),new i("oase",-1,1),new i("ate",-1,1),new i("itate",17,1),new i("ite",-1,1),new i("ante",-1,1),new i("iste",-1,3),new i("ute",-1,1),new i("ive",-1,1),new i("ici",-1,1),new i("abili",-1,1),new i("ibili",-1,1),new i("iuni",-1,2),new i("atori",-1,1),new i("osi",-1,1),new i("ati",-1,1),new i("itati",30,1),new i("iti",-1,1),new i("anti",-1,1),new i("isti",-1,3),new i("uti",-1,1),new i("işti",-1,3),new i("ivi",-1,1),new i("ităi",-1,1),new i("oşi",-1,1),new i("ităţi",-1,1),new i("abil",-1,1),new i("ibil",-1,1),new i("ism",-1,3),new i("ator",-1,1),new i("os",-1,1),new i("at",-1,1),new i("it",-1,1),new i("ant",-1,1),new i("ist",-1,3),new i("ut",-1,1),new i("iv",-1,1),new i("ică",-1,1),new i("abilă",-1,1),new i("ibilă",-1,1),new i("oasă",-1,1),new i("ată",-1,1),new i("ită",-1,1),new i("antă",-1,1),new i("istă",-1,3),new i("ută",-1,1),new i("ivă",-1,1)],q=[new i("ea",-1,1),new i("ia",-1,1),new i("esc",-1,1),new i("ăsc",-1,1),new i("ind",-1,1),new i("ând",-1,1),new i("are",-1,1),new i("ere",-1,1),new i("ire",-1,1),new i("âre",-1,1),new i("se",-1,2),new i("ase",10,1),new i("sese",10,2),new i("ise",10,1),new i("use",10,1),new i("âse",10,1),new i("eşte",-1,1),new i("ăşte",-1,1),new i("eze",-1,1),new i("ai",-1,1),new i("eai",19,1),new i("iai",19,1),new i("sei",-1,2),new i("eşti",-1,1),new i("ăşti",-1,1),new i("ui",-1,1),new i("ezi",-1,1),new i("âi",-1,1),new i("aşi",-1,1),new i("seşi",-1,2),new i("aseşi",29,1),new i("seseşi",29,2),new i("iseşi",29,1),new i("useşi",29,1),new i("âseşi",29,1),new i("işi",-1,1),new i("uşi",-1,1),new i("âşi",-1,1),new i("aţi",-1,2),new i("eaţi",38,1),new i("iaţi",38,1),new i("eţi",-1,2),new i("iţi",-1,2),new i("âţi",-1,2),new i("arăţi",-1,1),new i("serăţi",-1,2),new i("aserăţi",45,1),new i("seserăţi",45,2),new i("iserăţi",45,1),new i("userăţi",45,1),new i("âserăţi",45,1),new i("irăţi",-1,1),new i("urăţi",-1,1),new i("ârăţi",-1,1),new i("am",-1,1),new i("eam",54,1),new i("iam",54,1),new i("em",-1,2),new i("asem",57,1),new i("sesem",57,2),new i("isem",57,1),new i("usem",57,1),new i("âsem",57,1),new i("im",-1,2),new i("âm",-1,2),new i("ăm",-1,2),new i("arăm",65,1),new i("serăm",65,2),new i("aserăm",67,1),new i("seserăm",67,2),new i("iserăm",67,1),new i("userăm",67,1),new i("âserăm",67,1),new i("irăm",65,1),new i("urăm",65,1),new i("ârăm",65,1),new i("au",-1,1),new i("eau",76,1),new i("iau",76,1),new i("indu",-1,1),new i("ându",-1,1),new i("ez",-1,1),new i("ească",-1,1),new i("ară",-1,1),new i("seră",-1,2),new i("aseră",84,1),new i("seseră",84,2),new i("iseră",84,1),new i("useră",84,1),new i("âseră",84,1),new i("iră",-1,1),new i("ură",-1,1),new i("âră",-1,1),new i("ează",-1,1)],S=[new i("a",-1,1),new i("e",-1,1),new i("ie",1,1),new i("i",-1,1),new i("ă",-1,1)],W=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,2,32,0,0,4],L=new r;this.setCurrent=function(e){L.setCurrent(e)},this.getCurrent=function(){return L.getCurrent()},this.stem=function(){var e=L.cursor;return n(),L.cursor=e,c(),L.limit_backward=e,L.cursor=L.limit,f(),L.cursor=L.limit,d(),L.cursor=L.limit,_||(L.cursor=L.limit,b(),L.cursor=L.limit),v(),L.cursor=L.limit_backward,s(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}}(),e.Pipeline.registerFunction(e.ro.stemmer,"stemmer-ro"),e.ro.stopWordFilter=e.generateStopWordFilter("acea aceasta această aceea acei aceia acel acela acele acelea acest acesta aceste acestea aceşti aceştia acolo acord acum ai aia aibă aici al ale alea altceva altcineva am ar are asemenea asta astea astăzi asupra au avea avem aveţi azi aş aşadar aţi bine bucur bună ca care caut ce cel ceva chiar cinci cine cineva contra cu cum cumva curând curînd când cât câte câtva câţi cînd cît cîte cîtva cîţi că căci cărei căror cărui către da dacă dar datorită dată dau de deci deja deoarece departe deşi din dinaintea dintr- dintre doi doilea două drept după dă ea ei el ele eram este eu eşti face fata fi fie fiecare fii fim fiu fiţi frumos fără graţie halbă iar ieri la le li lor lui lângă lîngă mai mea mei mele mereu meu mi mie mine mult multă mulţi mulţumesc mâine mîine mă ne nevoie nici nicăieri nimeni nimeri nimic nişte noastre noastră noi noroc nostru nouă noştri nu opt ori oricare orice oricine oricum oricând oricât oricînd oricît oriunde patra patru patrulea pe pentru peste pic poate pot prea prima primul prin puţin puţina puţină până pînă rog sa sale sau se spate spre sub sunt suntem sunteţi sută sînt sîntem sînteţi să săi său ta tale te timp tine toate toată tot totuşi toţi trei treia treilea tu tăi tău un una unde undeva unei uneia unele uneori unii unor unora unu unui unuia unul vi voastre voastră voi vostru vouă voştri vreme vreo vreun vă zece zero zi zice îi îl îmi împotriva în înainte înaintea încotro încât încît între întrucât întrucît îţi ăla ălea ăsta ăstea ăştia şapte şase şi ştiu ţi ţie".split(" ")),e.Pipeline.registerFunction(e.ro.stopWordFilter,"stopWordFilter-ro")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.ru.min.js b/assets/javascripts/lunr/min/lunr.ru.min.js deleted file mode 100644 index 186cc485c238..000000000000 --- a/assets/javascripts/lunr/min/lunr.ru.min.js +++ /dev/null @@ -1,18 +0,0 @@ -/*! - * Lunr languages, `Russian` language - * https://github.com/MihaiValentin/lunr-languages - * - * Copyright 2014, Mihai Valentin - * http://www.mozilla.org/MPL/ - */ -/*! - * based on - * Snowball JavaScript Library v0.3 - * http://code.google.com/p/urim/ - * http://snowball.tartarus.org/ - * - * Copyright 2010, Oleg Mazko - * http://www.mozilla.org/MPL/ - */ - -!function(e,n){"function"==typeof define&&define.amd?define(n):"object"==typeof exports?module.exports=n():n()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.ru=function(){this.pipeline.reset(),this.pipeline.add(e.ru.trimmer,e.ru.stopWordFilter,e.ru.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.ru.stemmer))},e.ru.wordCharacters="Ѐ-҄҇-ԯᴫᵸⷠ-ⷿꙀ-ꚟ︮︯",e.ru.trimmer=e.trimmerSupport.generateTrimmer(e.ru.wordCharacters),e.Pipeline.registerFunction(e.ru.trimmer,"trimmer-ru"),e.ru.stemmer=function(){var n=e.stemmerSupport.Among,r=e.stemmerSupport.SnowballProgram,t=new function(){function e(){for(;!W.in_grouping(S,1072,1103);){if(W.cursor>=W.limit)return!1;W.cursor++}return!0}function t(){for(;!W.out_grouping(S,1072,1103);){if(W.cursor>=W.limit)return!1;W.cursor++}return!0}function w(){b=W.limit,_=b,e()&&(b=W.cursor,t()&&e()&&t()&&(_=W.cursor))}function i(){return _<=W.cursor}function u(e,n){var r,t;if(W.ket=W.cursor,r=W.find_among_b(e,n)){switch(W.bra=W.cursor,r){case 1:if(t=W.limit-W.cursor,!W.eq_s_b(1,"а")&&(W.cursor=W.limit-t,!W.eq_s_b(1,"я")))return!1;case 2:W.slice_del()}return!0}return!1}function o(){return u(h,9)}function s(e,n){var r;return W.ket=W.cursor,!!(r=W.find_among_b(e,n))&&(W.bra=W.cursor,1==r&&W.slice_del(),!0)}function c(){return s(g,26)}function m(){return!!c()&&(u(C,8),!0)}function f(){return s(k,2)}function l(){return u(P,46)}function a(){s(v,36)}function p(){var e;W.ket=W.cursor,(e=W.find_among_b(F,2))&&(W.bra=W.cursor,i()&&1==e&&W.slice_del())}function d(){var e;if(W.ket=W.cursor,e=W.find_among_b(q,4))switch(W.bra=W.cursor,e){case 1:if(W.slice_del(),W.ket=W.cursor,!W.eq_s_b(1,"н"))break;W.bra=W.cursor;case 2:if(!W.eq_s_b(1,"н"))break;case 3:W.slice_del()}}var _,b,h=[new n("в",-1,1),new n("ив",0,2),new n("ыв",0,2),new n("вши",-1,1),new n("ивши",3,2),new n("ывши",3,2),new n("вшись",-1,1),new n("ившись",6,2),new n("ывшись",6,2)],g=[new n("ее",-1,1),new n("ие",-1,1),new n("ое",-1,1),new n("ые",-1,1),new n("ими",-1,1),new n("ыми",-1,1),new n("ей",-1,1),new n("ий",-1,1),new n("ой",-1,1),new n("ый",-1,1),new n("ем",-1,1),new n("им",-1,1),new n("ом",-1,1),new n("ым",-1,1),new n("его",-1,1),new n("ого",-1,1),new n("ему",-1,1),new n("ому",-1,1),new n("их",-1,1),new n("ых",-1,1),new n("ею",-1,1),new n("ою",-1,1),new n("ую",-1,1),new n("юю",-1,1),new n("ая",-1,1),new n("яя",-1,1)],C=[new n("ем",-1,1),new n("нн",-1,1),new n("вш",-1,1),new n("ивш",2,2),new n("ывш",2,2),new n("щ",-1,1),new n("ющ",5,1),new n("ующ",6,2)],k=[new n("сь",-1,1),new n("ся",-1,1)],P=[new n("ла",-1,1),new n("ила",0,2),new n("ыла",0,2),new n("на",-1,1),new n("ена",3,2),new n("ете",-1,1),new n("ите",-1,2),new n("йте",-1,1),new n("ейте",7,2),new n("уйте",7,2),new n("ли",-1,1),new n("или",10,2),new n("ыли",10,2),new n("й",-1,1),new n("ей",13,2),new n("уй",13,2),new n("л",-1,1),new n("ил",16,2),new n("ыл",16,2),new n("ем",-1,1),new n("им",-1,2),new n("ым",-1,2),new n("н",-1,1),new n("ен",22,2),new n("ло",-1,1),new n("ило",24,2),new n("ыло",24,2),new n("но",-1,1),new n("ено",27,2),new n("нно",27,1),new n("ет",-1,1),new n("ует",30,2),new n("ит",-1,2),new n("ыт",-1,2),new n("ют",-1,1),new n("уют",34,2),new n("ят",-1,2),new n("ны",-1,1),new n("ены",37,2),new n("ть",-1,1),new n("ить",39,2),new n("ыть",39,2),new n("ешь",-1,1),new n("ишь",-1,2),new n("ю",-1,2),new n("ую",44,2)],v=[new n("а",-1,1),new n("ев",-1,1),new n("ов",-1,1),new n("е",-1,1),new n("ие",3,1),new n("ье",3,1),new n("и",-1,1),new n("еи",6,1),new n("ии",6,1),new n("ами",6,1),new n("ями",6,1),new n("иями",10,1),new n("й",-1,1),new n("ей",12,1),new n("ией",13,1),new n("ий",12,1),new n("ой",12,1),new n("ам",-1,1),new n("ем",-1,1),new n("ием",18,1),new n("ом",-1,1),new n("ям",-1,1),new n("иям",21,1),new n("о",-1,1),new n("у",-1,1),new n("ах",-1,1),new n("ях",-1,1),new n("иях",26,1),new n("ы",-1,1),new n("ь",-1,1),new n("ю",-1,1),new n("ию",30,1),new n("ью",30,1),new n("я",-1,1),new n("ия",33,1),new n("ья",33,1)],F=[new n("ост",-1,1),new n("ость",-1,1)],q=[new n("ейше",-1,1),new n("н",-1,2),new n("ейш",-1,1),new n("ь",-1,3)],S=[33,65,8,232],W=new r;this.setCurrent=function(e){W.setCurrent(e)},this.getCurrent=function(){return W.getCurrent()},this.stem=function(){return w(),W.cursor=W.limit,!(W.cursor=i&&(e-=i,t[e>>3]&1<<(7&e)))return this.cursor++,!0}return!1},in_grouping_b:function(t,i,s){if(this.cursor>this.limit_backward){var e=r.charCodeAt(this.cursor-1);if(e<=s&&e>=i&&(e-=i,t[e>>3]&1<<(7&e)))return this.cursor--,!0}return!1},out_grouping:function(t,i,s){if(this.cursors||e>3]&1<<(7&e)))return this.cursor++,!0}return!1},out_grouping_b:function(t,i,s){if(this.cursor>this.limit_backward){var e=r.charCodeAt(this.cursor-1);if(e>s||e>3]&1<<(7&e)))return this.cursor--,!0}return!1},eq_s:function(t,i){if(this.limit-this.cursor>1),f=0,l=o0||e==s||c)break;c=!0}}for(;;){var _=t[s];if(o>=_.s_size){if(this.cursor=n+_.s_size,!_.method)return _.result;var b=_.method();if(this.cursor=n+_.s_size,b)return _.result}if((s=_.substring_i)<0)return 0}},find_among_b:function(t,i){for(var s=0,e=i,n=this.cursor,u=this.limit_backward,o=0,h=0,c=!1;;){for(var a=s+(e-s>>1),f=0,l=o=0;m--){if(n-l==u){f=-1;break}if(f=r.charCodeAt(n-1-l)-_.s[m])break;l++}if(f<0?(e=a,h=l):(s=a,o=l),e-s<=1){if(s>0||e==s||c)break;c=!0}}for(;;){var _=t[s];if(o>=_.s_size){if(this.cursor=n-_.s_size,!_.method)return _.result;var b=_.method();if(this.cursor=n-_.s_size,b)return _.result}if((s=_.substring_i)<0)return 0}},replace_s:function(t,i,s){var e=s.length-(i-t),n=r.substring(0,t),u=r.substring(i);return r=n+s+u,this.limit+=e,this.cursor>=i?this.cursor+=e:this.cursor>t&&(this.cursor=t),e},slice_check:function(){if(this.bra<0||this.bra>this.ket||this.ket>this.limit||this.limit>r.length)throw"faulty slice operation"},slice_from:function(r){this.slice_check(),this.replace_s(this.bra,this.ket,r)},slice_del:function(){this.slice_from("")},insert:function(r,t,i){var s=this.replace_s(r,t,i);r<=this.bra&&(this.bra+=s),r<=this.ket&&(this.ket+=s)},slice_to:function(){return this.slice_check(),r.substring(this.bra,this.ket)},eq_v_b:function(r){return this.eq_s_b(r.length,r)}}}},r.trimmerSupport={generateTrimmer:function(r){var t=new RegExp("^[^"+r+"]+"),i=new RegExp("[^"+r+"]+$");return function(r){return"function"==typeof r.update?r.update(function(r){return r.replace(t,"").replace(i,"")}):r.replace(t,"").replace(i,"")}}}}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.sv.min.js b/assets/javascripts/lunr/min/lunr.sv.min.js deleted file mode 100644 index 3e5eb6400026..000000000000 --- a/assets/javascripts/lunr/min/lunr.sv.min.js +++ /dev/null @@ -1,18 +0,0 @@ -/*! - * Lunr languages, `Swedish` language - * https://github.com/MihaiValentin/lunr-languages - * - * Copyright 2014, Mihai Valentin - * http://www.mozilla.org/MPL/ - */ -/*! - * based on - * Snowball JavaScript Library v0.3 - * http://code.google.com/p/urim/ - * http://snowball.tartarus.org/ - * - * Copyright 2010, Oleg Mazko - * http://www.mozilla.org/MPL/ - */ - -!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.sv=function(){this.pipeline.reset(),this.pipeline.add(e.sv.trimmer,e.sv.stopWordFilter,e.sv.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.sv.stemmer))},e.sv.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.sv.trimmer=e.trimmerSupport.generateTrimmer(e.sv.wordCharacters),e.Pipeline.registerFunction(e.sv.trimmer,"trimmer-sv"),e.sv.stemmer=function(){var r=e.stemmerSupport.Among,n=e.stemmerSupport.SnowballProgram,t=new function(){function e(){var e,r=w.cursor+3;if(o=w.limit,0<=r||r<=w.limit){for(a=r;;){if(e=w.cursor,w.in_grouping(l,97,246)){w.cursor=e;break}if(w.cursor=e,w.cursor>=w.limit)return;w.cursor++}for(;!w.out_grouping(l,97,246);){if(w.cursor>=w.limit)return;w.cursor++}o=w.cursor,o=o&&(w.limit_backward=o,w.cursor=w.limit,w.ket=w.cursor,e=w.find_among_b(u,37),w.limit_backward=r,e))switch(w.bra=w.cursor,e){case 1:w.slice_del();break;case 2:w.in_grouping_b(d,98,121)&&w.slice_del()}}function i(){var e=w.limit_backward;w.cursor>=o&&(w.limit_backward=o,w.cursor=w.limit,w.find_among_b(c,7)&&(w.cursor=w.limit,w.ket=w.cursor,w.cursor>w.limit_backward&&(w.bra=--w.cursor,w.slice_del())),w.limit_backward=e)}function s(){var e,r;if(w.cursor>=o){if(r=w.limit_backward,w.limit_backward=o,w.cursor=w.limit,w.ket=w.cursor,e=w.find_among_b(m,5))switch(w.bra=w.cursor,e){case 1:w.slice_del();break;case 2:w.slice_from("lös");break;case 3:w.slice_from("full")}w.limit_backward=r}}var a,o,u=[new r("a",-1,1),new r("arna",0,1),new r("erna",0,1),new r("heterna",2,1),new r("orna",0,1),new r("ad",-1,1),new r("e",-1,1),new r("ade",6,1),new r("ande",6,1),new r("arne",6,1),new r("are",6,1),new r("aste",6,1),new r("en",-1,1),new r("anden",12,1),new r("aren",12,1),new r("heten",12,1),new r("ern",-1,1),new r("ar",-1,1),new r("er",-1,1),new r("heter",18,1),new r("or",-1,1),new r("s",-1,2),new r("as",21,1),new r("arnas",22,1),new r("ernas",22,1),new r("ornas",22,1),new r("es",21,1),new r("ades",26,1),new r("andes",26,1),new r("ens",21,1),new r("arens",29,1),new r("hetens",29,1),new r("erns",21,1),new r("at",-1,1),new r("andet",-1,1),new r("het",-1,1),new r("ast",-1,1)],c=[new r("dd",-1,-1),new r("gd",-1,-1),new r("nn",-1,-1),new r("dt",-1,-1),new r("gt",-1,-1),new r("kt",-1,-1),new r("tt",-1,-1)],m=[new r("ig",-1,1),new r("lig",0,1),new r("els",-1,1),new r("fullt",-1,3),new r("löst",-1,2)],l=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,24,0,32],d=[119,127,149],w=new n;this.setCurrent=function(e){w.setCurrent(e)},this.getCurrent=function(){return w.getCurrent()},this.stem=function(){var r=w.cursor;return e(),w.limit_backward=r,w.cursor=w.limit,t(),w.cursor=w.limit,i(),w.cursor=w.limit,s(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return t.setCurrent(e),t.stem(),t.getCurrent()}):(t.setCurrent(e),t.stem(),t.getCurrent())}}(),e.Pipeline.registerFunction(e.sv.stemmer,"stemmer-sv"),e.sv.stopWordFilter=e.generateStopWordFilter("alla allt att av blev bli blir blivit de dem den denna deras dess dessa det detta dig din dina ditt du där då efter ej eller en er era ert ett från för ha hade han hans har henne hennes hon honom hur här i icke ingen inom inte jag ju kan kunde man med mellan men mig min mina mitt mot mycket ni nu när någon något några och om oss på samma sedan sig sin sina sitta själv skulle som så sådan sådana sådant till under upp ut utan vad var vara varför varit varje vars vart vem vi vid vilka vilkas vilken vilket vår våra vårt än är åt över".split(" ")),e.Pipeline.registerFunction(e.sv.stopWordFilter,"stopWordFilter-sv")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.th.min.js b/assets/javascripts/lunr/min/lunr.th.min.js deleted file mode 100644 index dee3aac6e5cb..000000000000 --- a/assets/javascripts/lunr/min/lunr.th.min.js +++ /dev/null @@ -1 +0,0 @@ -!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var r="2"==e.version[0];e.th=function(){this.pipeline.reset(),this.pipeline.add(e.th.trimmer),r?this.tokenizer=e.th.tokenizer:(e.tokenizer&&(e.tokenizer=e.th.tokenizer),this.tokenizerFn&&(this.tokenizerFn=e.th.tokenizer))},e.th.wordCharacters="[฀-๿]",e.th.trimmer=e.trimmerSupport.generateTrimmer(e.th.wordCharacters),e.Pipeline.registerFunction(e.th.trimmer,"trimmer-th");var t=e.wordcut;t.init(),e.th.tokenizer=function(i){if(!arguments.length||null==i||void 0==i)return[];if(Array.isArray(i))return i.map(function(t){return r?new e.Token(t):t});var n=i.toString().replace(/^\s+/,"");return t.cut(n).split("|")}}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.tr.min.js b/assets/javascripts/lunr/min/lunr.tr.min.js deleted file mode 100644 index 563f6ec1f525..000000000000 --- a/assets/javascripts/lunr/min/lunr.tr.min.js +++ /dev/null @@ -1,18 +0,0 @@ -/*! - * Lunr languages, `Turkish` language - * https://github.com/MihaiValentin/lunr-languages - * - * Copyright 2014, Mihai Valentin - * http://www.mozilla.org/MPL/ - */ -/*! - * based on - * Snowball JavaScript Library v0.3 - * http://code.google.com/p/urim/ - * http://snowball.tartarus.org/ - * - * Copyright 2010, Oleg Mazko - * http://www.mozilla.org/MPL/ - */ - -!function(r,i){"function"==typeof define&&define.amd?define(i):"object"==typeof exports?module.exports=i():i()(r.lunr)}(this,function(){return function(r){if(void 0===r)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===r.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");r.tr=function(){this.pipeline.reset(),this.pipeline.add(r.tr.trimmer,r.tr.stopWordFilter,r.tr.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(r.tr.stemmer))},r.tr.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",r.tr.trimmer=r.trimmerSupport.generateTrimmer(r.tr.wordCharacters),r.Pipeline.registerFunction(r.tr.trimmer,"trimmer-tr"),r.tr.stemmer=function(){var i=r.stemmerSupport.Among,e=r.stemmerSupport.SnowballProgram,n=new function(){function r(r,i,e){for(;;){var n=Dr.limit-Dr.cursor;if(Dr.in_grouping_b(r,i,e)){Dr.cursor=Dr.limit-n;break}if(Dr.cursor=Dr.limit-n,Dr.cursor<=Dr.limit_backward)return!1;Dr.cursor--}return!0}function n(){var i,e;i=Dr.limit-Dr.cursor,r(Wr,97,305);for(var n=0;nDr.limit_backward&&(Dr.cursor--,e=Dr.limit-Dr.cursor,i()))?(Dr.cursor=Dr.limit-e,!0):(Dr.cursor=Dr.limit-n,r()?(Dr.cursor=Dr.limit-n,!1):(Dr.cursor=Dr.limit-n,!(Dr.cursor<=Dr.limit_backward)&&(Dr.cursor--,!!i()&&(Dr.cursor=Dr.limit-n,!0))))}function u(r){return t(r,function(){return Dr.in_grouping_b(Wr,97,305)})}function o(){return u(function(){return Dr.eq_s_b(1,"n")})}function s(){return u(function(){return Dr.eq_s_b(1,"s")})}function c(){return u(function(){return Dr.eq_s_b(1,"y")})}function l(){return t(function(){return Dr.in_grouping_b(Lr,105,305)},function(){return Dr.out_grouping_b(Wr,97,305)})}function a(){return Dr.find_among_b(ur,10)&&l()}function m(){return n()&&Dr.in_grouping_b(Lr,105,305)&&s()}function d(){return Dr.find_among_b(or,2)}function f(){return n()&&Dr.in_grouping_b(Lr,105,305)&&c()}function b(){return n()&&Dr.find_among_b(sr,4)}function w(){return n()&&Dr.find_among_b(cr,4)&&o()}function _(){return n()&&Dr.find_among_b(lr,2)&&c()}function k(){return n()&&Dr.find_among_b(ar,2)}function p(){return n()&&Dr.find_among_b(mr,4)}function g(){return n()&&Dr.find_among_b(dr,2)}function y(){return n()&&Dr.find_among_b(fr,4)}function z(){return n()&&Dr.find_among_b(br,2)}function v(){return n()&&Dr.find_among_b(wr,2)&&c()}function h(){return Dr.eq_s_b(2,"ki")}function q(){return n()&&Dr.find_among_b(_r,2)&&o()}function C(){return n()&&Dr.find_among_b(kr,4)&&c()}function P(){return n()&&Dr.find_among_b(pr,4)}function F(){return n()&&Dr.find_among_b(gr,4)&&c()}function S(){return Dr.find_among_b(yr,4)}function W(){return n()&&Dr.find_among_b(zr,2)}function L(){return n()&&Dr.find_among_b(vr,4)}function x(){return n()&&Dr.find_among_b(hr,8)}function A(){return Dr.find_among_b(qr,2)}function E(){return n()&&Dr.find_among_b(Cr,32)&&c()}function j(){return Dr.find_among_b(Pr,8)&&c()}function T(){return n()&&Dr.find_among_b(Fr,4)&&c()}function Z(){return Dr.eq_s_b(3,"ken")&&c()}function B(){var r=Dr.limit-Dr.cursor;return!(T()||(Dr.cursor=Dr.limit-r,E()||(Dr.cursor=Dr.limit-r,j()||(Dr.cursor=Dr.limit-r,Z()))))}function D(){if(A()){var r=Dr.limit-Dr.cursor;if(S()||(Dr.cursor=Dr.limit-r,W()||(Dr.cursor=Dr.limit-r,C()||(Dr.cursor=Dr.limit-r,P()||(Dr.cursor=Dr.limit-r,F()||(Dr.cursor=Dr.limit-r))))),T())return!1}return!0}function G(){if(W()){Dr.bra=Dr.cursor,Dr.slice_del();var r=Dr.limit-Dr.cursor;return Dr.ket=Dr.cursor,x()||(Dr.cursor=Dr.limit-r,E()||(Dr.cursor=Dr.limit-r,j()||(Dr.cursor=Dr.limit-r,T()||(Dr.cursor=Dr.limit-r)))),nr=!1,!1}return!0}function H(){if(!L())return!0;var r=Dr.limit-Dr.cursor;return!E()&&(Dr.cursor=Dr.limit-r,!j())}function I(){var r,i=Dr.limit-Dr.cursor;return!(S()||(Dr.cursor=Dr.limit-i,F()||(Dr.cursor=Dr.limit-i,P()||(Dr.cursor=Dr.limit-i,C()))))||(Dr.bra=Dr.cursor,Dr.slice_del(),r=Dr.limit-Dr.cursor,Dr.ket=Dr.cursor,T()||(Dr.cursor=Dr.limit-r),!1)}function J(){var r,i=Dr.limit-Dr.cursor;if(Dr.ket=Dr.cursor,nr=!0,B()&&(Dr.cursor=Dr.limit-i,D()&&(Dr.cursor=Dr.limit-i,G()&&(Dr.cursor=Dr.limit-i,H()&&(Dr.cursor=Dr.limit-i,I()))))){if(Dr.cursor=Dr.limit-i,!x())return;Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,r=Dr.limit-Dr.cursor,S()||(Dr.cursor=Dr.limit-r,W()||(Dr.cursor=Dr.limit-r,C()||(Dr.cursor=Dr.limit-r,P()||(Dr.cursor=Dr.limit-r,F()||(Dr.cursor=Dr.limit-r))))),T()||(Dr.cursor=Dr.limit-r)}Dr.bra=Dr.cursor,Dr.slice_del()}function K(){var r,i,e,n;if(Dr.ket=Dr.cursor,h()){if(r=Dr.limit-Dr.cursor,p())return Dr.bra=Dr.cursor,Dr.slice_del(),i=Dr.limit-Dr.cursor,Dr.ket=Dr.cursor,W()?(Dr.bra=Dr.cursor,Dr.slice_del(),K()):(Dr.cursor=Dr.limit-i,a()&&(Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K()))),!0;if(Dr.cursor=Dr.limit-r,w()){if(Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,e=Dr.limit-Dr.cursor,d())Dr.bra=Dr.cursor,Dr.slice_del();else{if(Dr.cursor=Dr.limit-e,Dr.ket=Dr.cursor,!a()&&(Dr.cursor=Dr.limit-e,!m()&&(Dr.cursor=Dr.limit-e,!K())))return!0;Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K())}return!0}if(Dr.cursor=Dr.limit-r,g()){if(n=Dr.limit-Dr.cursor,d())Dr.bra=Dr.cursor,Dr.slice_del();else if(Dr.cursor=Dr.limit-n,m())Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K());else if(Dr.cursor=Dr.limit-n,!K())return!1;return!0}}return!1}function M(r){if(Dr.ket=Dr.cursor,!g()&&(Dr.cursor=Dr.limit-r,!k()))return!1;var i=Dr.limit-Dr.cursor;if(d())Dr.bra=Dr.cursor,Dr.slice_del();else if(Dr.cursor=Dr.limit-i,m())Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K());else if(Dr.cursor=Dr.limit-i,!K())return!1;return!0}function N(r){if(Dr.ket=Dr.cursor,!z()&&(Dr.cursor=Dr.limit-r,!b()))return!1;var i=Dr.limit-Dr.cursor;return!(!m()&&(Dr.cursor=Dr.limit-i,!d()))&&(Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K()),!0)}function O(){var r,i=Dr.limit-Dr.cursor;return Dr.ket=Dr.cursor,!(!w()&&(Dr.cursor=Dr.limit-i,!v()))&&(Dr.bra=Dr.cursor,Dr.slice_del(),r=Dr.limit-Dr.cursor,Dr.ket=Dr.cursor,!(!W()||(Dr.bra=Dr.cursor,Dr.slice_del(),!K()))||(Dr.cursor=Dr.limit-r,Dr.ket=Dr.cursor,!(a()||(Dr.cursor=Dr.limit-r,m()||(Dr.cursor=Dr.limit-r,K())))||(Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K()),!0)))}function Q(){var r,i,e=Dr.limit-Dr.cursor;if(Dr.ket=Dr.cursor,!p()&&(Dr.cursor=Dr.limit-e,!f()&&(Dr.cursor=Dr.limit-e,!_())))return!1;if(Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,r=Dr.limit-Dr.cursor,a())Dr.bra=Dr.cursor,Dr.slice_del(),i=Dr.limit-Dr.cursor,Dr.ket=Dr.cursor,W()||(Dr.cursor=Dr.limit-i);else if(Dr.cursor=Dr.limit-r,!W())return!0;return Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,K(),!0}function R(){var r,i,e=Dr.limit-Dr.cursor;if(Dr.ket=Dr.cursor,W())return Dr.bra=Dr.cursor,Dr.slice_del(),void K();if(Dr.cursor=Dr.limit-e,Dr.ket=Dr.cursor,q())if(Dr.bra=Dr.cursor,Dr.slice_del(),r=Dr.limit-Dr.cursor,Dr.ket=Dr.cursor,d())Dr.bra=Dr.cursor,Dr.slice_del();else{if(Dr.cursor=Dr.limit-r,Dr.ket=Dr.cursor,!a()&&(Dr.cursor=Dr.limit-r,!m())){if(Dr.cursor=Dr.limit-r,Dr.ket=Dr.cursor,!W())return;if(Dr.bra=Dr.cursor,Dr.slice_del(),!K())return}Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K())}else if(Dr.cursor=Dr.limit-e,!M(e)&&(Dr.cursor=Dr.limit-e,!N(e))){if(Dr.cursor=Dr.limit-e,Dr.ket=Dr.cursor,y())return Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,i=Dr.limit-Dr.cursor,void(a()?(Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K())):(Dr.cursor=Dr.limit-i,W()?(Dr.bra=Dr.cursor,Dr.slice_del(),K()):(Dr.cursor=Dr.limit-i,K())));if(Dr.cursor=Dr.limit-e,!O()){if(Dr.cursor=Dr.limit-e,d())return Dr.bra=Dr.cursor,void Dr.slice_del();Dr.cursor=Dr.limit-e,K()||(Dr.cursor=Dr.limit-e,Q()||(Dr.cursor=Dr.limit-e,Dr.ket=Dr.cursor,(a()||(Dr.cursor=Dr.limit-e,m()))&&(Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K()))))}}}function U(){var r;if(Dr.ket=Dr.cursor,r=Dr.find_among_b(Sr,4))switch(Dr.bra=Dr.cursor,r){case 1:Dr.slice_from("p");break;case 2:Dr.slice_from("ç");break;case 3:Dr.slice_from("t");break;case 4:Dr.slice_from("k")}}function V(){for(;;){var r=Dr.limit-Dr.cursor;if(Dr.in_grouping_b(Wr,97,305)){Dr.cursor=Dr.limit-r;break}if(Dr.cursor=Dr.limit-r,Dr.cursor<=Dr.limit_backward)return!1;Dr.cursor--}return!0}function X(r,i,e){if(Dr.cursor=Dr.limit-r,V()){var n=Dr.limit-Dr.cursor;if(!Dr.eq_s_b(1,i)&&(Dr.cursor=Dr.limit-n,!Dr.eq_s_b(1,e)))return!0;Dr.cursor=Dr.limit-r;var t=Dr.cursor;return Dr.insert(Dr.cursor,Dr.cursor,e),Dr.cursor=t,!1}return!0}function Y(){var r=Dr.limit-Dr.cursor;(Dr.eq_s_b(1,"d")||(Dr.cursor=Dr.limit-r,Dr.eq_s_b(1,"g")))&&X(r,"a","ı")&&X(r,"e","i")&&X(r,"o","u")&&X(r,"ö","ü")}function $(){for(var r,i=Dr.cursor,e=2;;){for(r=Dr.cursor;!Dr.in_grouping(Wr,97,305);){if(Dr.cursor>=Dr.limit)return Dr.cursor=r,!(e>0)&&(Dr.cursor=i,!0);Dr.cursor++}e--}}function rr(r,i,e){for(;!Dr.eq_s(i,e);){if(Dr.cursor>=Dr.limit)return!0;Dr.cursor++}return(tr=i)!=Dr.limit||(Dr.cursor=r,!1)}function ir(){var r=Dr.cursor;return!rr(r,2,"ad")||(Dr.cursor=r,!rr(r,5,"soyad"))}function er(){var r=Dr.cursor;return!ir()&&(Dr.limit_backward=r,Dr.cursor=Dr.limit,Y(),Dr.cursor=Dr.limit,U(),!0)}var nr,tr,ur=[new i("m",-1,-1),new i("n",-1,-1),new i("miz",-1,-1),new i("niz",-1,-1),new i("muz",-1,-1),new i("nuz",-1,-1),new i("müz",-1,-1),new i("nüz",-1,-1),new i("mız",-1,-1),new i("nız",-1,-1)],or=[new i("leri",-1,-1),new i("ları",-1,-1)],sr=[new i("ni",-1,-1),new i("nu",-1,-1),new i("nü",-1,-1),new i("nı",-1,-1)],cr=[new i("in",-1,-1),new i("un",-1,-1),new i("ün",-1,-1),new i("ın",-1,-1)],lr=[new i("a",-1,-1),new i("e",-1,-1)],ar=[new i("na",-1,-1),new i("ne",-1,-1)],mr=[new i("da",-1,-1),new i("ta",-1,-1),new i("de",-1,-1),new i("te",-1,-1)],dr=[new i("nda",-1,-1),new i("nde",-1,-1)],fr=[new i("dan",-1,-1),new i("tan",-1,-1),new i("den",-1,-1),new i("ten",-1,-1)],br=[new i("ndan",-1,-1),new i("nden",-1,-1)],wr=[new i("la",-1,-1),new i("le",-1,-1)],_r=[new i("ca",-1,-1),new i("ce",-1,-1)],kr=[new i("im",-1,-1),new i("um",-1,-1),new i("üm",-1,-1),new i("ım",-1,-1)],pr=[new i("sin",-1,-1),new i("sun",-1,-1),new i("sün",-1,-1),new i("sın",-1,-1)],gr=[new i("iz",-1,-1),new i("uz",-1,-1),new i("üz",-1,-1),new i("ız",-1,-1)],yr=[new i("siniz",-1,-1),new i("sunuz",-1,-1),new i("sünüz",-1,-1),new i("sınız",-1,-1)],zr=[new i("lar",-1,-1),new i("ler",-1,-1)],vr=[new i("niz",-1,-1),new i("nuz",-1,-1),new i("nüz",-1,-1),new i("nız",-1,-1)],hr=[new i("dir",-1,-1),new i("tir",-1,-1),new i("dur",-1,-1),new i("tur",-1,-1),new i("dür",-1,-1),new i("tür",-1,-1),new i("dır",-1,-1),new i("tır",-1,-1)],qr=[new i("casına",-1,-1),new i("cesine",-1,-1)],Cr=[new i("di",-1,-1),new i("ti",-1,-1),new i("dik",-1,-1),new i("tik",-1,-1),new i("duk",-1,-1),new i("tuk",-1,-1),new i("dük",-1,-1),new i("tük",-1,-1),new i("dık",-1,-1),new i("tık",-1,-1),new i("dim",-1,-1),new i("tim",-1,-1),new i("dum",-1,-1),new i("tum",-1,-1),new i("düm",-1,-1),new i("tüm",-1,-1),new i("dım",-1,-1),new i("tım",-1,-1),new i("din",-1,-1),new i("tin",-1,-1),new i("dun",-1,-1),new i("tun",-1,-1),new i("dün",-1,-1),new i("tün",-1,-1),new i("dın",-1,-1),new i("tın",-1,-1),new i("du",-1,-1),new i("tu",-1,-1),new i("dü",-1,-1),new i("tü",-1,-1),new i("dı",-1,-1),new i("tı",-1,-1)],Pr=[new i("sa",-1,-1),new i("se",-1,-1),new i("sak",-1,-1),new i("sek",-1,-1),new i("sam",-1,-1),new i("sem",-1,-1),new i("san",-1,-1),new i("sen",-1,-1)],Fr=[new i("miş",-1,-1),new i("muş",-1,-1),new i("müş",-1,-1),new i("mış",-1,-1)],Sr=[new i("b",-1,1),new i("c",-1,2),new i("d",-1,3),new i("ğ",-1,4)],Wr=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,32,8,0,0,0,0,0,0,1],Lr=[1,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,0,0,0,0,0,1],xr=[1,64,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],Ar=[17,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,130],Er=[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],jr=[17],Tr=[65],Zr=[65],Br=[["a",xr,97,305],["e",Ar,101,252],["ı",Er,97,305],["i",jr,101,105],["o",Tr,111,117],["ö",Zr,246,252],["u",Tr,111,117]],Dr=new e;this.setCurrent=function(r){Dr.setCurrent(r)},this.getCurrent=function(){return Dr.getCurrent()},this.stem=function(){return!!($()&&(Dr.limit_backward=Dr.cursor,Dr.cursor=Dr.limit,J(),Dr.cursor=Dr.limit,nr&&(R(),Dr.cursor=Dr.limit_backward,er())))}};return function(r){return"function"==typeof r.update?r.update(function(r){return n.setCurrent(r),n.stem(),n.getCurrent()}):(n.setCurrent(r),n.stem(),n.getCurrent())}}(),r.Pipeline.registerFunction(r.tr.stemmer,"stemmer-tr"),r.tr.stopWordFilter=r.generateStopWordFilter("acaba altmış altı ama ancak arada aslında ayrıca bana bazı belki ben benden beni benim beri beş bile bin bir biri birkaç birkez birçok birşey birşeyi biz bizden bize bizi bizim bu buna bunda bundan bunlar bunları bunların bunu bunun burada böyle böylece da daha dahi de defa değil diye diğer doksan dokuz dolayı dolayısıyla dört edecek eden ederek edilecek ediliyor edilmesi ediyor elli en etmesi etti ettiği ettiğini eğer gibi göre halen hangi hatta hem henüz hep hepsi her herhangi herkesin hiç hiçbir iki ile ilgili ise itibaren itibariyle için işte kadar karşın katrilyon kendi kendilerine kendini kendisi kendisine kendisini kez ki kim kimden kime kimi kimse kırk milyar milyon mu mü mı nasıl ne neden nedenle nerde nerede nereye niye niçin o olan olarak oldu olduklarını olduğu olduğunu olmadı olmadığı olmak olması olmayan olmaz olsa olsun olup olur olursa oluyor on ona ondan onlar onlardan onları onların onu onun otuz oysa pek rağmen sadece sanki sekiz seksen sen senden seni senin siz sizden sizi sizin tarafından trilyon tüm var vardı ve veya ya yani yapacak yapmak yaptı yaptıkları yaptığı yaptığını yapılan yapılması yapıyor yedi yerine yetmiş yine yirmi yoksa yüz zaten çok çünkü öyle üzere üç şey şeyden şeyi şeyler şu şuna şunda şundan şunları şunu şöyle".split(" ")),r.Pipeline.registerFunction(r.tr.stopWordFilter,"stopWordFilter-tr")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.vi.min.js b/assets/javascripts/lunr/min/lunr.vi.min.js deleted file mode 100644 index 22aed28c49b8..000000000000 --- a/assets/javascripts/lunr/min/lunr.vi.min.js +++ /dev/null @@ -1 +0,0 @@ -!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.vi=function(){this.pipeline.reset(),this.pipeline.add(e.vi.stopWordFilter,e.vi.trimmer)},e.vi.wordCharacters="[A-Za-ẓ̀͐́͑̉̃̓ÂâÊêÔôĂ-ăĐ-đƠ-ơƯ-ư]",e.vi.trimmer=e.trimmerSupport.generateTrimmer(e.vi.wordCharacters),e.Pipeline.registerFunction(e.vi.trimmer,"trimmer-vi"),e.vi.stopWordFilter=e.generateStopWordFilter("là cái nhưng mà".split(" "))}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.zh.min.js b/assets/javascripts/lunr/min/lunr.zh.min.js deleted file mode 100644 index 7727bbe24d71..000000000000 --- a/assets/javascripts/lunr/min/lunr.zh.min.js +++ /dev/null @@ -1 +0,0 @@ -!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r(require("nodejieba")):r()(e.lunr)}(this,function(e){return function(r,t){if(void 0===r)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===r.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var i="2"==r.version[0];r.zh=function(){this.pipeline.reset(),this.pipeline.add(r.zh.trimmer,r.zh.stopWordFilter,r.zh.stemmer),i?this.tokenizer=r.zh.tokenizer:(r.tokenizer&&(r.tokenizer=r.zh.tokenizer),this.tokenizerFn&&(this.tokenizerFn=r.zh.tokenizer))},r.zh.tokenizer=function(n){if(!arguments.length||null==n||void 0==n)return[];if(Array.isArray(n))return n.map(function(e){return i?new r.Token(e.toLowerCase()):e.toLowerCase()});t&&e.load(t);var o=n.toString().trim().toLowerCase(),s=[];e.cut(o,!0).forEach(function(e){s=s.concat(e.split(" "))}),s=s.filter(function(e){return!!e});var u=0;return s.map(function(e,t){if(i){var n=o.indexOf(e,u),s={};return s.position=[n,e.length],s.index=t,u=n,new r.Token(e,s)}return e})},r.zh.wordCharacters="\\w一-龥",r.zh.trimmer=r.trimmerSupport.generateTrimmer(r.zh.wordCharacters),r.Pipeline.registerFunction(r.zh.trimmer,"trimmer-zh"),r.zh.stemmer=function(){return function(e){return e}}(),r.Pipeline.registerFunction(r.zh.stemmer,"stemmer-zh"),r.zh.stopWordFilter=r.generateStopWordFilter("的 一 不 在 人 有 是 为 以 于 上 他 而 后 之 来 及 了 因 下 可 到 由 这 与 也 此 但 并 个 其 已 无 小 我 们 起 最 再 今 去 好 只 又 或 很 亦 某 把 那 你 乃 它 吧 被 比 别 趁 当 从 到 得 打 凡 儿 尔 该 各 给 跟 和 何 还 即 几 既 看 据 距 靠 啦 了 另 么 每 们 嘛 拿 哪 那 您 凭 且 却 让 仍 啥 如 若 使 谁 虽 随 同 所 她 哇 嗡 往 哪 些 向 沿 哟 用 于 咱 则 怎 曾 至 致 着 诸 自".split(" ")),r.Pipeline.registerFunction(r.zh.stopWordFilter,"stopWordFilter-zh")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/tinyseg.js b/assets/javascripts/lunr/tinyseg.js deleted file mode 100644 index 167fa6dd69e0..000000000000 --- a/assets/javascripts/lunr/tinyseg.js +++ /dev/null @@ -1,206 +0,0 @@ -/** - * export the module via AMD, CommonJS or as a browser global - * Export code from https://github.com/umdjs/umd/blob/master/returnExports.js - */ -;(function (root, factory) { - if (typeof define === 'function' && define.amd) { - // AMD. Register as an anonymous module. - define(factory) - } else if (typeof exports === 'object') { - /** - * Node. Does not work with strict CommonJS, but - * only CommonJS-like environments that support module.exports, - * like Node. - */ - module.exports = factory() - } else { - // Browser globals (root is window) - factory()(root.lunr); - } -}(this, function () { - /** - * Just return a value to define the module export. - * This example returns an object, but the module - * can return a function as the exported value. - */ - - return function(lunr) { - // TinySegmenter 0.1 -- Super compact Japanese tokenizer in Javascript - // (c) 2008 Taku Kudo - // TinySegmenter is freely distributable under the terms of a new BSD licence. - // For details, see http://chasen.org/~taku/software/TinySegmenter/LICENCE.txt - - function TinySegmenter() { - var patterns = { - "[一二三四五六七八九十百千万億兆]":"M", - "[一-龠々〆ヵヶ]":"H", - "[ぁ-ん]":"I", - "[ァ-ヴーア-ン゙ー]":"K", - "[a-zA-Za-zA-Z]":"A", - "[0-90-9]":"N" - } - this.chartype_ = []; - for (var i in patterns) { - var regexp = new RegExp(i); - this.chartype_.push([regexp, patterns[i]]); - } - - this.BIAS__ = -332 - this.BC1__ = {"HH":6,"II":2461,"KH":406,"OH":-1378}; - this.BC2__ = {"AA":-3267,"AI":2744,"AN":-878,"HH":-4070,"HM":-1711,"HN":4012,"HO":3761,"IA":1327,"IH":-1184,"II":-1332,"IK":1721,"IO":5492,"KI":3831,"KK":-8741,"MH":-3132,"MK":3334,"OO":-2920}; - this.BC3__ = {"HH":996,"HI":626,"HK":-721,"HN":-1307,"HO":-836,"IH":-301,"KK":2762,"MK":1079,"MM":4034,"OA":-1652,"OH":266}; - this.BP1__ = {"BB":295,"OB":304,"OO":-125,"UB":352}; - this.BP2__ = {"BO":60,"OO":-1762}; - this.BQ1__ = {"BHH":1150,"BHM":1521,"BII":-1158,"BIM":886,"BMH":1208,"BNH":449,"BOH":-91,"BOO":-2597,"OHI":451,"OIH":-296,"OKA":1851,"OKH":-1020,"OKK":904,"OOO":2965}; - this.BQ2__ = {"BHH":118,"BHI":-1159,"BHM":466,"BIH":-919,"BKK":-1720,"BKO":864,"OHH":-1139,"OHM":-181,"OIH":153,"UHI":-1146}; - this.BQ3__ = {"BHH":-792,"BHI":2664,"BII":-299,"BKI":419,"BMH":937,"BMM":8335,"BNN":998,"BOH":775,"OHH":2174,"OHM":439,"OII":280,"OKH":1798,"OKI":-793,"OKO":-2242,"OMH":-2402,"OOO":11699}; - this.BQ4__ = {"BHH":-3895,"BIH":3761,"BII":-4654,"BIK":1348,"BKK":-1806,"BMI":-3385,"BOO":-12396,"OAH":926,"OHH":266,"OHK":-2036,"ONN":-973}; - this.BW1__ = {",と":660,",同":727,"B1あ":1404,"B1同":542,"、と":660,"、同":727,"」と":1682,"あっ":1505,"いう":1743,"いっ":-2055,"いる":672,"うし":-4817,"うん":665,"から":3472,"がら":600,"こう":-790,"こと":2083,"こん":-1262,"さら":-4143,"さん":4573,"した":2641,"して":1104,"すで":-3399,"そこ":1977,"それ":-871,"たち":1122,"ため":601,"った":3463,"つい":-802,"てい":805,"てき":1249,"でき":1127,"です":3445,"では":844,"とい":-4915,"とみ":1922,"どこ":3887,"ない":5713,"なっ":3015,"など":7379,"なん":-1113,"にし":2468,"には":1498,"にも":1671,"に対":-912,"の一":-501,"の中":741,"ませ":2448,"まで":1711,"まま":2600,"まる":-2155,"やむ":-1947,"よっ":-2565,"れた":2369,"れで":-913,"をし":1860,"を見":731,"亡く":-1886,"京都":2558,"取り":-2784,"大き":-2604,"大阪":1497,"平方":-2314,"引き":-1336,"日本":-195,"本当":-2423,"毎日":-2113,"目指":-724,"B1あ":1404,"B1同":542,"」と":1682}; - this.BW2__ = {"..":-11822,"11":-669,"――":-5730,"−−":-13175,"いう":-1609,"うか":2490,"かし":-1350,"かも":-602,"から":-7194,"かれ":4612,"がい":853,"がら":-3198,"きた":1941,"くな":-1597,"こと":-8392,"この":-4193,"させ":4533,"され":13168,"さん":-3977,"しい":-1819,"しか":-545,"した":5078,"して":972,"しな":939,"その":-3744,"たい":-1253,"たた":-662,"ただ":-3857,"たち":-786,"たと":1224,"たは":-939,"った":4589,"って":1647,"っと":-2094,"てい":6144,"てき":3640,"てく":2551,"ては":-3110,"ても":-3065,"でい":2666,"でき":-1528,"でし":-3828,"です":-4761,"でも":-4203,"とい":1890,"とこ":-1746,"とと":-2279,"との":720,"とみ":5168,"とも":-3941,"ない":-2488,"なが":-1313,"など":-6509,"なの":2614,"なん":3099,"にお":-1615,"にし":2748,"にな":2454,"によ":-7236,"に対":-14943,"に従":-4688,"に関":-11388,"のか":2093,"ので":-7059,"のに":-6041,"のの":-6125,"はい":1073,"はが":-1033,"はず":-2532,"ばれ":1813,"まし":-1316,"まで":-6621,"まれ":5409,"めて":-3153,"もい":2230,"もの":-10713,"らか":-944,"らし":-1611,"らに":-1897,"りし":651,"りま":1620,"れた":4270,"れて":849,"れば":4114,"ろう":6067,"われ":7901,"を通":-11877,"んだ":728,"んな":-4115,"一人":602,"一方":-1375,"一日":970,"一部":-1051,"上が":-4479,"会社":-1116,"出て":2163,"分の":-7758,"同党":970,"同日":-913,"大阪":-2471,"委員":-1250,"少な":-1050,"年度":-8669,"年間":-1626,"府県":-2363,"手権":-1982,"新聞":-4066,"日新":-722,"日本":-7068,"日米":3372,"曜日":-601,"朝鮮":-2355,"本人":-2697,"東京":-1543,"然と":-1384,"社会":-1276,"立て":-990,"第に":-1612,"米国":-4268,"11":-669}; - this.BW3__ = {"あた":-2194,"あり":719,"ある":3846,"い.":-1185,"い。":-1185,"いい":5308,"いえ":2079,"いく":3029,"いた":2056,"いっ":1883,"いる":5600,"いわ":1527,"うち":1117,"うと":4798,"えと":1454,"か.":2857,"か。":2857,"かけ":-743,"かっ":-4098,"かに":-669,"から":6520,"かり":-2670,"が,":1816,"が、":1816,"がき":-4855,"がけ":-1127,"がっ":-913,"がら":-4977,"がり":-2064,"きた":1645,"けど":1374,"こと":7397,"この":1542,"ころ":-2757,"さい":-714,"さを":976,"し,":1557,"し、":1557,"しい":-3714,"した":3562,"して":1449,"しな":2608,"しま":1200,"す.":-1310,"す。":-1310,"する":6521,"ず,":3426,"ず、":3426,"ずに":841,"そう":428,"た.":8875,"た。":8875,"たい":-594,"たの":812,"たり":-1183,"たる":-853,"だ.":4098,"だ。":4098,"だっ":1004,"った":-4748,"って":300,"てい":6240,"てお":855,"ても":302,"です":1437,"でに":-1482,"では":2295,"とう":-1387,"とし":2266,"との":541,"とも":-3543,"どう":4664,"ない":1796,"なく":-903,"など":2135,"に,":-1021,"に、":-1021,"にし":1771,"にな":1906,"には":2644,"の,":-724,"の、":-724,"の子":-1000,"は,":1337,"は、":1337,"べき":2181,"まし":1113,"ます":6943,"まっ":-1549,"まで":6154,"まれ":-793,"らし":1479,"られ":6820,"るる":3818,"れ,":854,"れ、":854,"れた":1850,"れて":1375,"れば":-3246,"れる":1091,"われ":-605,"んだ":606,"んで":798,"カ月":990,"会議":860,"入り":1232,"大会":2217,"始め":1681,"市":965,"新聞":-5055,"日,":974,"日、":974,"社会":2024,"カ月":990}; - this.TC1__ = {"AAA":1093,"HHH":1029,"HHM":580,"HII":998,"HOH":-390,"HOM":-331,"IHI":1169,"IOH":-142,"IOI":-1015,"IOM":467,"MMH":187,"OOI":-1832}; - this.TC2__ = {"HHO":2088,"HII":-1023,"HMM":-1154,"IHI":-1965,"KKH":703,"OII":-2649}; - this.TC3__ = {"AAA":-294,"HHH":346,"HHI":-341,"HII":-1088,"HIK":731,"HOH":-1486,"IHH":128,"IHI":-3041,"IHO":-1935,"IIH":-825,"IIM":-1035,"IOI":-542,"KHH":-1216,"KKA":491,"KKH":-1217,"KOK":-1009,"MHH":-2694,"MHM":-457,"MHO":123,"MMH":-471,"NNH":-1689,"NNO":662,"OHO":-3393}; - this.TC4__ = {"HHH":-203,"HHI":1344,"HHK":365,"HHM":-122,"HHN":182,"HHO":669,"HIH":804,"HII":679,"HOH":446,"IHH":695,"IHO":-2324,"IIH":321,"III":1497,"IIO":656,"IOO":54,"KAK":4845,"KKA":3386,"KKK":3065,"MHH":-405,"MHI":201,"MMH":-241,"MMM":661,"MOM":841}; - this.TQ1__ = {"BHHH":-227,"BHHI":316,"BHIH":-132,"BIHH":60,"BIII":1595,"BNHH":-744,"BOHH":225,"BOOO":-908,"OAKK":482,"OHHH":281,"OHIH":249,"OIHI":200,"OIIH":-68}; - this.TQ2__ = {"BIHH":-1401,"BIII":-1033,"BKAK":-543,"BOOO":-5591}; - this.TQ3__ = {"BHHH":478,"BHHM":-1073,"BHIH":222,"BHII":-504,"BIIH":-116,"BIII":-105,"BMHI":-863,"BMHM":-464,"BOMH":620,"OHHH":346,"OHHI":1729,"OHII":997,"OHMH":481,"OIHH":623,"OIIH":1344,"OKAK":2792,"OKHH":587,"OKKA":679,"OOHH":110,"OOII":-685}; - this.TQ4__ = {"BHHH":-721,"BHHM":-3604,"BHII":-966,"BIIH":-607,"BIII":-2181,"OAAA":-2763,"OAKK":180,"OHHH":-294,"OHHI":2446,"OHHO":480,"OHIH":-1573,"OIHH":1935,"OIHI":-493,"OIIH":626,"OIII":-4007,"OKAK":-8156}; - this.TW1__ = {"につい":-4681,"東京都":2026}; - this.TW2__ = {"ある程":-2049,"いった":-1256,"ころが":-2434,"しょう":3873,"その後":-4430,"だって":-1049,"ていた":1833,"として":-4657,"ともに":-4517,"もので":1882,"一気に":-792,"初めて":-1512,"同時に":-8097,"大きな":-1255,"対して":-2721,"社会党":-3216}; - this.TW3__ = {"いただ":-1734,"してい":1314,"として":-4314,"につい":-5483,"にとっ":-5989,"に当た":-6247,"ので,":-727,"ので、":-727,"のもの":-600,"れから":-3752,"十二月":-2287}; - this.TW4__ = {"いう.":8576,"いう。":8576,"からな":-2348,"してい":2958,"たが,":1516,"たが、":1516,"ている":1538,"という":1349,"ました":5543,"ません":1097,"ようと":-4258,"よると":5865}; - this.UC1__ = {"A":484,"K":93,"M":645,"O":-505}; - this.UC2__ = {"A":819,"H":1059,"I":409,"M":3987,"N":5775,"O":646}; - this.UC3__ = {"A":-1370,"I":2311}; - this.UC4__ = {"A":-2643,"H":1809,"I":-1032,"K":-3450,"M":3565,"N":3876,"O":6646}; - this.UC5__ = {"H":313,"I":-1238,"K":-799,"M":539,"O":-831}; - this.UC6__ = {"H":-506,"I":-253,"K":87,"M":247,"O":-387}; - this.UP1__ = {"O":-214}; - this.UP2__ = {"B":69,"O":935}; - this.UP3__ = {"B":189}; - this.UQ1__ = {"BH":21,"BI":-12,"BK":-99,"BN":142,"BO":-56,"OH":-95,"OI":477,"OK":410,"OO":-2422}; - this.UQ2__ = {"BH":216,"BI":113,"OK":1759}; - this.UQ3__ = {"BA":-479,"BH":42,"BI":1913,"BK":-7198,"BM":3160,"BN":6427,"BO":14761,"OI":-827,"ON":-3212}; - this.UW1__ = {",":156,"、":156,"「":-463,"あ":-941,"う":-127,"が":-553,"き":121,"こ":505,"で":-201,"と":-547,"ど":-123,"に":-789,"の":-185,"は":-847,"も":-466,"や":-470,"よ":182,"ら":-292,"り":208,"れ":169,"を":-446,"ん":-137,"・":-135,"主":-402,"京":-268,"区":-912,"午":871,"国":-460,"大":561,"委":729,"市":-411,"日":-141,"理":361,"生":-408,"県":-386,"都":-718,"「":-463,"・":-135}; - this.UW2__ = {",":-829,"、":-829,"〇":892,"「":-645,"」":3145,"あ":-538,"い":505,"う":134,"お":-502,"か":1454,"が":-856,"く":-412,"こ":1141,"さ":878,"ざ":540,"し":1529,"す":-675,"せ":300,"そ":-1011,"た":188,"だ":1837,"つ":-949,"て":-291,"で":-268,"と":-981,"ど":1273,"な":1063,"に":-1764,"の":130,"は":-409,"ひ":-1273,"べ":1261,"ま":600,"も":-1263,"や":-402,"よ":1639,"り":-579,"る":-694,"れ":571,"を":-2516,"ん":2095,"ア":-587,"カ":306,"キ":568,"ッ":831,"三":-758,"不":-2150,"世":-302,"中":-968,"主":-861,"事":492,"人":-123,"会":978,"保":362,"入":548,"初":-3025,"副":-1566,"北":-3414,"区":-422,"大":-1769,"天":-865,"太":-483,"子":-1519,"学":760,"実":1023,"小":-2009,"市":-813,"年":-1060,"強":1067,"手":-1519,"揺":-1033,"政":1522,"文":-1355,"新":-1682,"日":-1815,"明":-1462,"最":-630,"朝":-1843,"本":-1650,"東":-931,"果":-665,"次":-2378,"民":-180,"気":-1740,"理":752,"発":529,"目":-1584,"相":-242,"県":-1165,"立":-763,"第":810,"米":509,"自":-1353,"行":838,"西":-744,"見":-3874,"調":1010,"議":1198,"込":3041,"開":1758,"間":-1257,"「":-645,"」":3145,"ッ":831,"ア":-587,"カ":306,"キ":568}; - this.UW3__ = {",":4889,"1":-800,"−":-1723,"、":4889,"々":-2311,"〇":5827,"」":2670,"〓":-3573,"あ":-2696,"い":1006,"う":2342,"え":1983,"お":-4864,"か":-1163,"が":3271,"く":1004,"け":388,"げ":401,"こ":-3552,"ご":-3116,"さ":-1058,"し":-395,"す":584,"せ":3685,"そ":-5228,"た":842,"ち":-521,"っ":-1444,"つ":-1081,"て":6167,"で":2318,"と":1691,"ど":-899,"な":-2788,"に":2745,"の":4056,"は":4555,"ひ":-2171,"ふ":-1798,"へ":1199,"ほ":-5516,"ま":-4384,"み":-120,"め":1205,"も":2323,"や":-788,"よ":-202,"ら":727,"り":649,"る":5905,"れ":2773,"わ":-1207,"を":6620,"ん":-518,"ア":551,"グ":1319,"ス":874,"ッ":-1350,"ト":521,"ム":1109,"ル":1591,"ロ":2201,"ン":278,"・":-3794,"一":-1619,"下":-1759,"世":-2087,"両":3815,"中":653,"主":-758,"予":-1193,"二":974,"人":2742,"今":792,"他":1889,"以":-1368,"低":811,"何":4265,"作":-361,"保":-2439,"元":4858,"党":3593,"全":1574,"公":-3030,"六":755,"共":-1880,"円":5807,"再":3095,"分":457,"初":2475,"別":1129,"前":2286,"副":4437,"力":365,"動":-949,"務":-1872,"化":1327,"北":-1038,"区":4646,"千":-2309,"午":-783,"協":-1006,"口":483,"右":1233,"各":3588,"合":-241,"同":3906,"和":-837,"員":4513,"国":642,"型":1389,"場":1219,"外":-241,"妻":2016,"学":-1356,"安":-423,"実":-1008,"家":1078,"小":-513,"少":-3102,"州":1155,"市":3197,"平":-1804,"年":2416,"広":-1030,"府":1605,"度":1452,"建":-2352,"当":-3885,"得":1905,"思":-1291,"性":1822,"戸":-488,"指":-3973,"政":-2013,"教":-1479,"数":3222,"文":-1489,"新":1764,"日":2099,"旧":5792,"昨":-661,"時":-1248,"曜":-951,"最":-937,"月":4125,"期":360,"李":3094,"村":364,"東":-805,"核":5156,"森":2438,"業":484,"氏":2613,"民":-1694,"決":-1073,"法":1868,"海":-495,"無":979,"物":461,"特":-3850,"生":-273,"用":914,"町":1215,"的":7313,"直":-1835,"省":792,"県":6293,"知":-1528,"私":4231,"税":401,"立":-960,"第":1201,"米":7767,"系":3066,"約":3663,"級":1384,"統":-4229,"総":1163,"線":1255,"者":6457,"能":725,"自":-2869,"英":785,"見":1044,"調":-562,"財":-733,"費":1777,"車":1835,"軍":1375,"込":-1504,"通":-1136,"選":-681,"郎":1026,"郡":4404,"部":1200,"金":2163,"長":421,"開":-1432,"間":1302,"関":-1282,"雨":2009,"電":-1045,"非":2066,"駅":1620,"1":-800,"」":2670,"・":-3794,"ッ":-1350,"ア":551,"グ":1319,"ス":874,"ト":521,"ム":1109,"ル":1591,"ロ":2201,"ン":278}; - this.UW4__ = {",":3930,".":3508,"―":-4841,"、":3930,"。":3508,"〇":4999,"「":1895,"」":3798,"〓":-5156,"あ":4752,"い":-3435,"う":-640,"え":-2514,"お":2405,"か":530,"が":6006,"き":-4482,"ぎ":-3821,"く":-3788,"け":-4376,"げ":-4734,"こ":2255,"ご":1979,"さ":2864,"し":-843,"じ":-2506,"す":-731,"ず":1251,"せ":181,"そ":4091,"た":5034,"だ":5408,"ち":-3654,"っ":-5882,"つ":-1659,"て":3994,"で":7410,"と":4547,"な":5433,"に":6499,"ぬ":1853,"ね":1413,"の":7396,"は":8578,"ば":1940,"ひ":4249,"び":-4134,"ふ":1345,"へ":6665,"べ":-744,"ほ":1464,"ま":1051,"み":-2082,"む":-882,"め":-5046,"も":4169,"ゃ":-2666,"や":2795,"ょ":-1544,"よ":3351,"ら":-2922,"り":-9726,"る":-14896,"れ":-2613,"ろ":-4570,"わ":-1783,"を":13150,"ん":-2352,"カ":2145,"コ":1789,"セ":1287,"ッ":-724,"ト":-403,"メ":-1635,"ラ":-881,"リ":-541,"ル":-856,"ン":-3637,"・":-4371,"ー":-11870,"一":-2069,"中":2210,"予":782,"事":-190,"井":-1768,"人":1036,"以":544,"会":950,"体":-1286,"作":530,"側":4292,"先":601,"党":-2006,"共":-1212,"内":584,"円":788,"初":1347,"前":1623,"副":3879,"力":-302,"動":-740,"務":-2715,"化":776,"区":4517,"協":1013,"参":1555,"合":-1834,"和":-681,"員":-910,"器":-851,"回":1500,"国":-619,"園":-1200,"地":866,"場":-1410,"塁":-2094,"士":-1413,"多":1067,"大":571,"子":-4802,"学":-1397,"定":-1057,"寺":-809,"小":1910,"屋":-1328,"山":-1500,"島":-2056,"川":-2667,"市":2771,"年":374,"庁":-4556,"後":456,"性":553,"感":916,"所":-1566,"支":856,"改":787,"政":2182,"教":704,"文":522,"方":-856,"日":1798,"時":1829,"最":845,"月":-9066,"木":-485,"来":-442,"校":-360,"業":-1043,"氏":5388,"民":-2716,"気":-910,"沢":-939,"済":-543,"物":-735,"率":672,"球":-1267,"生":-1286,"産":-1101,"田":-2900,"町":1826,"的":2586,"目":922,"省":-3485,"県":2997,"空":-867,"立":-2112,"第":788,"米":2937,"系":786,"約":2171,"経":1146,"統":-1169,"総":940,"線":-994,"署":749,"者":2145,"能":-730,"般":-852,"行":-792,"規":792,"警":-1184,"議":-244,"谷":-1000,"賞":730,"車":-1481,"軍":1158,"輪":-1433,"込":-3370,"近":929,"道":-1291,"選":2596,"郎":-4866,"都":1192,"野":-1100,"銀":-2213,"長":357,"間":-2344,"院":-2297,"際":-2604,"電":-878,"領":-1659,"題":-792,"館":-1984,"首":1749,"高":2120,"「":1895,"」":3798,"・":-4371,"ッ":-724,"ー":-11870,"カ":2145,"コ":1789,"セ":1287,"ト":-403,"メ":-1635,"ラ":-881,"リ":-541,"ル":-856,"ン":-3637}; - this.UW5__ = {",":465,".":-299,"1":-514,"E2":-32768,"]":-2762,"、":465,"。":-299,"「":363,"あ":1655,"い":331,"う":-503,"え":1199,"お":527,"か":647,"が":-421,"き":1624,"ぎ":1971,"く":312,"げ":-983,"さ":-1537,"し":-1371,"す":-852,"だ":-1186,"ち":1093,"っ":52,"つ":921,"て":-18,"で":-850,"と":-127,"ど":1682,"な":-787,"に":-1224,"の":-635,"は":-578,"べ":1001,"み":502,"め":865,"ゃ":3350,"ょ":854,"り":-208,"る":429,"れ":504,"わ":419,"を":-1264,"ん":327,"イ":241,"ル":451,"ン":-343,"中":-871,"京":722,"会":-1153,"党":-654,"務":3519,"区":-901,"告":848,"員":2104,"大":-1296,"学":-548,"定":1785,"嵐":-1304,"市":-2991,"席":921,"年":1763,"思":872,"所":-814,"挙":1618,"新":-1682,"日":218,"月":-4353,"査":932,"格":1356,"機":-1508,"氏":-1347,"田":240,"町":-3912,"的":-3149,"相":1319,"省":-1052,"県":-4003,"研":-997,"社":-278,"空":-813,"統":1955,"者":-2233,"表":663,"語":-1073,"議":1219,"選":-1018,"郎":-368,"長":786,"間":1191,"題":2368,"館":-689,"1":-514,"E2":-32768,"「":363,"イ":241,"ル":451,"ン":-343}; - this.UW6__ = {",":227,".":808,"1":-270,"E1":306,"、":227,"。":808,"あ":-307,"う":189,"か":241,"が":-73,"く":-121,"こ":-200,"じ":1782,"す":383,"た":-428,"っ":573,"て":-1014,"で":101,"と":-105,"な":-253,"に":-149,"の":-417,"は":-236,"も":-206,"り":187,"る":-135,"を":195,"ル":-673,"ン":-496,"一":-277,"中":201,"件":-800,"会":624,"前":302,"区":1792,"員":-1212,"委":798,"学":-960,"市":887,"広":-695,"後":535,"業":-697,"相":753,"社":-507,"福":974,"空":-822,"者":1811,"連":463,"郎":1082,"1":-270,"E1":306,"ル":-673,"ン":-496}; - - return this; - } - TinySegmenter.prototype.ctype_ = function(str) { - for (var i in this.chartype_) { - if (str.match(this.chartype_[i][0])) { - return this.chartype_[i][1]; - } - } - return "O"; - } - - TinySegmenter.prototype.ts_ = function(v) { - if (v) { return v; } - return 0; - } - - TinySegmenter.prototype.segment = function(input) { - if (input == null || input == undefined || input == "") { - return []; - } - var result = []; - var seg = ["B3","B2","B1"]; - var ctype = ["O","O","O"]; - var o = input.split(""); - for (i = 0; i < o.length; ++i) { - seg.push(o[i]); - ctype.push(this.ctype_(o[i])) - } - seg.push("E1"); - seg.push("E2"); - seg.push("E3"); - ctype.push("O"); - ctype.push("O"); - ctype.push("O"); - var word = seg[3]; - var p1 = "U"; - var p2 = "U"; - var p3 = "U"; - for (var i = 4; i < seg.length - 3; ++i) { - var score = this.BIAS__; - var w1 = seg[i-3]; - var w2 = seg[i-2]; - var w3 = seg[i-1]; - var w4 = seg[i]; - var w5 = seg[i+1]; - var w6 = seg[i+2]; - var c1 = ctype[i-3]; - var c2 = ctype[i-2]; - var c3 = ctype[i-1]; - var c4 = ctype[i]; - var c5 = ctype[i+1]; - var c6 = ctype[i+2]; - score += this.ts_(this.UP1__[p1]); - score += this.ts_(this.UP2__[p2]); - score += this.ts_(this.UP3__[p3]); - score += this.ts_(this.BP1__[p1 + p2]); - score += this.ts_(this.BP2__[p2 + p3]); - score += this.ts_(this.UW1__[w1]); - score += this.ts_(this.UW2__[w2]); - score += this.ts_(this.UW3__[w3]); - score += this.ts_(this.UW4__[w4]); - score += this.ts_(this.UW5__[w5]); - score += this.ts_(this.UW6__[w6]); - score += this.ts_(this.BW1__[w2 + w3]); - score += this.ts_(this.BW2__[w3 + w4]); - score += this.ts_(this.BW3__[w4 + w5]); - score += this.ts_(this.TW1__[w1 + w2 + w3]); - score += this.ts_(this.TW2__[w2 + w3 + w4]); - score += this.ts_(this.TW3__[w3 + w4 + w5]); - score += this.ts_(this.TW4__[w4 + w5 + w6]); - score += this.ts_(this.UC1__[c1]); - score += this.ts_(this.UC2__[c2]); - score += this.ts_(this.UC3__[c3]); - score += this.ts_(this.UC4__[c4]); - score += this.ts_(this.UC5__[c5]); - score += this.ts_(this.UC6__[c6]); - score += this.ts_(this.BC1__[c2 + c3]); - score += this.ts_(this.BC2__[c3 + c4]); - score += this.ts_(this.BC3__[c4 + c5]); - score += this.ts_(this.TC1__[c1 + c2 + c3]); - score += this.ts_(this.TC2__[c2 + c3 + c4]); - score += this.ts_(this.TC3__[c3 + c4 + c5]); - score += this.ts_(this.TC4__[c4 + c5 + c6]); - // score += this.ts_(this.TC5__[c4 + c5 + c6]); - score += this.ts_(this.UQ1__[p1 + c1]); - score += this.ts_(this.UQ2__[p2 + c2]); - score += this.ts_(this.UQ3__[p3 + c3]); - score += this.ts_(this.BQ1__[p2 + c2 + c3]); - score += this.ts_(this.BQ2__[p2 + c3 + c4]); - score += this.ts_(this.BQ3__[p3 + c2 + c3]); - score += this.ts_(this.BQ4__[p3 + c3 + c4]); - score += this.ts_(this.TQ1__[p2 + c1 + c2 + c3]); - score += this.ts_(this.TQ2__[p2 + c2 + c3 + c4]); - score += this.ts_(this.TQ3__[p3 + c1 + c2 + c3]); - score += this.ts_(this.TQ4__[p3 + c2 + c3 + c4]); - var p = "O"; - if (score > 0) { - result.push(word); - word = ""; - p = "B"; - } - p1 = p2; - p2 = p3; - p3 = p; - word += seg[i]; - } - result.push(word); - - return result; - } - - lunr.TinySegmenter = TinySegmenter; - }; - -})); \ No newline at end of file diff --git a/assets/javascripts/lunr/wordcut.js b/assets/javascripts/lunr/wordcut.js deleted file mode 100644 index 146f4b44bca1..000000000000 --- a/assets/javascripts/lunr/wordcut.js +++ /dev/null @@ -1,6708 +0,0 @@ -(function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}(g.lunr || (g.lunr = {})).wordcut = f()}})(function(){var define,module,exports;return (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o 1; - }) - this.addWords(words, false) - } - if(finalize){ - this.finalizeDict(); - } - }, - - dictSeek: function (l, r, ch, strOffset, pos) { - var ans = null; - while (l <= r) { - var m = Math.floor((l + r) / 2), - dict_item = this.dict[m], - len = dict_item.length; - if (len <= strOffset) { - l = m + 1; - } else { - var ch_ = dict_item[strOffset]; - if (ch_ < ch) { - l = m + 1; - } else if (ch_ > ch) { - r = m - 1; - } else { - ans = m; - if (pos == LEFT) { - r = m - 1; - } else { - l = m + 1; - } - } - } - } - return ans; - }, - - isFinal: function (acceptor) { - return this.dict[acceptor.l].length == acceptor.strOffset; - }, - - createAcceptor: function () { - return { - l: 0, - r: this.dict.length - 1, - strOffset: 0, - isFinal: false, - dict: this, - transit: function (ch) { - return this.dict.transit(this, ch); - }, - isError: false, - tag: "DICT", - w: 1, - type: "DICT" - }; - }, - - transit: function (acceptor, ch) { - var l = this.dictSeek(acceptor.l, - acceptor.r, - ch, - acceptor.strOffset, - LEFT); - if (l !== null) { - var r = this.dictSeek(l, - acceptor.r, - ch, - acceptor.strOffset, - RIGHT); - acceptor.l = l; - acceptor.r = r; - acceptor.strOffset++; - acceptor.isFinal = this.isFinal(acceptor); - } else { - acceptor.isError = true; - } - return acceptor; - }, - - sortuniq: function(a){ - return a.sort().filter(function(item, pos, arr){ - return !pos || item != arr[pos - 1]; - }) - }, - - flatten: function(a){ - //[[1,2],[3]] -> [1,2,3] - return [].concat.apply([], a); - } -}; -module.exports = WordcutDict; - -}).call(this,"/dist/tmp") -},{"glob":16,"path":22}],3:[function(require,module,exports){ -var WordRule = { - createAcceptor: function(tag) { - if (tag["WORD_RULE"]) - return null; - - return {strOffset: 0, - isFinal: false, - transit: function(ch) { - var lch = ch.toLowerCase(); - if (lch >= "a" && lch <= "z") { - this.isFinal = true; - this.strOffset++; - } else { - this.isError = true; - } - return this; - }, - isError: false, - tag: "WORD_RULE", - type: "WORD_RULE", - w: 1}; - } -}; - -var NumberRule = { - createAcceptor: function(tag) { - if (tag["NUMBER_RULE"]) - return null; - - return {strOffset: 0, - isFinal: false, - transit: function(ch) { - if (ch >= "0" && ch <= "9") { - this.isFinal = true; - this.strOffset++; - } else { - this.isError = true; - } - return this; - }, - isError: false, - tag: "NUMBER_RULE", - type: "NUMBER_RULE", - w: 1}; - } -}; - -var SpaceRule = { - tag: "SPACE_RULE", - createAcceptor: function(tag) { - - if (tag["SPACE_RULE"]) - return null; - - return {strOffset: 0, - isFinal: false, - transit: function(ch) { - if (ch == " " || ch == "\t" || ch == "\r" || ch == "\n" || - ch == "\u00A0" || ch=="\u2003"//nbsp and emsp - ) { - this.isFinal = true; - this.strOffset++; - } else { - this.isError = true; - } - return this; - }, - isError: false, - tag: SpaceRule.tag, - w: 1, - type: "SPACE_RULE"}; - } -} - -var SingleSymbolRule = { - tag: "SINSYM", - createAcceptor: function(tag) { - return {strOffset: 0, - isFinal: false, - transit: function(ch) { - if (this.strOffset == 0 && ch.match(/^[\@\(\)\/\,\-\."`]$/)) { - this.isFinal = true; - this.strOffset++; - } else { - this.isError = true; - } - return this; - }, - isError: false, - tag: "SINSYM", - w: 1, - type: "SINSYM"}; - } -} - - -var LatinRules = [WordRule, SpaceRule, SingleSymbolRule, NumberRule]; - -module.exports = LatinRules; - -},{}],4:[function(require,module,exports){ -var _ = require("underscore") - , WordcutCore = require("./wordcut_core"); -var PathInfoBuilder = { - - /* - buildByPartAcceptors: function(path, acceptors, i) { - var - var genInfos = partAcceptors.reduce(function(genInfos, acceptor) { - - }, []); - - return genInfos; - } - */ - - buildByAcceptors: function(path, finalAcceptors, i) { - var self = this; - var infos = finalAcceptors.map(function(acceptor) { - var p = i - acceptor.strOffset + 1 - , _info = path[p]; - - var info = {p: p, - mw: _info.mw + (acceptor.mw === undefined ? 0 : acceptor.mw), - w: acceptor.w + _info.w, - unk: (acceptor.unk ? acceptor.unk : 0) + _info.unk, - type: acceptor.type}; - - if (acceptor.type == "PART") { - for(var j = p + 1; j <= i; j++) { - path[j].merge = p; - } - info.merge = p; - } - - return info; - }); - return infos.filter(function(info) { return info; }); - }, - - fallback: function(path, leftBoundary, text, i) { - var _info = path[leftBoundary]; - if (text[i].match(/[\u0E48-\u0E4E]/)) { - if (leftBoundary != 0) - leftBoundary = path[leftBoundary].p; - return {p: leftBoundary, - mw: 0, - w: 1 + _info.w, - unk: 1 + _info.unk, - type: "UNK"}; -/* } else if(leftBoundary > 0 && path[leftBoundary].type !== "UNK") { - leftBoundary = path[leftBoundary].p; - return {p: leftBoundary, - w: 1 + _info.w, - unk: 1 + _info.unk, - type: "UNK"}; */ - } else { - return {p: leftBoundary, - mw: _info.mw, - w: 1 + _info.w, - unk: 1 + _info.unk, - type: "UNK"}; - } - }, - - build: function(path, finalAcceptors, i, leftBoundary, text) { - var basicPathInfos = this.buildByAcceptors(path, finalAcceptors, i); - if (basicPathInfos.length > 0) { - return basicPathInfos; - } else { - return [this.fallback(path, leftBoundary, text, i)]; - } - } -}; - -module.exports = function() { - return _.clone(PathInfoBuilder); -} - -},{"./wordcut_core":8,"underscore":25}],5:[function(require,module,exports){ -var _ = require("underscore"); - - -var PathSelector = { - selectPath: function(paths) { - var path = paths.reduce(function(selectedPath, path) { - if (selectedPath == null) { - return path; - } else { - if (path.unk < selectedPath.unk) - return path; - if (path.unk == selectedPath.unk) { - if (path.mw < selectedPath.mw) - return path - if (path.mw == selectedPath.mw) { - if (path.w < selectedPath.w) - return path; - } - } - return selectedPath; - } - }, null); - return path; - }, - - createPath: function() { - return [{p:null, w:0, unk:0, type: "INIT", mw:0}]; - } -}; - -module.exports = function() { - return _.clone(PathSelector); -}; - -},{"underscore":25}],6:[function(require,module,exports){ -function isMatch(pat, offset, ch) { - if (pat.length <= offset) - return false; - var _ch = pat[offset]; - return _ch == ch || - (_ch.match(/[กข]/) && ch.match(/[ก-ฮ]/)) || - (_ch.match(/[มบ]/) && ch.match(/[ก-ฮ]/)) || - (_ch.match(/\u0E49/) && ch.match(/[\u0E48-\u0E4B]/)); -} - -var Rule0 = { - pat: "เหก็ม", - createAcceptor: function(tag) { - return {strOffset: 0, - isFinal: false, - transit: function(ch) { - if (isMatch(Rule0.pat, this.strOffset,ch)) { - this.isFinal = (this.strOffset + 1 == Rule0.pat.length); - this.strOffset++; - } else { - this.isError = true; - } - return this; - }, - isError: false, - tag: "THAI_RULE", - type: "THAI_RULE", - w: 1}; - } -}; - -var PartRule = { - createAcceptor: function(tag) { - return {strOffset: 0, - patterns: [ - "แก", "เก", "ก้", "กก์", "กา", "กี", "กิ", "กืก" - ], - isFinal: false, - transit: function(ch) { - var offset = this.strOffset; - this.patterns = this.patterns.filter(function(pat) { - return isMatch(pat, offset, ch); - }); - - if (this.patterns.length > 0) { - var len = 1 + offset; - this.isFinal = this.patterns.some(function(pat) { - return pat.length == len; - }); - this.strOffset++; - } else { - this.isError = true; - } - return this; - }, - isError: false, - tag: "PART", - type: "PART", - unk: 1, - w: 1}; - } -}; - -var ThaiRules = [Rule0, PartRule]; - -module.exports = ThaiRules; - -},{}],7:[function(require,module,exports){ -var sys = require("sys") - , WordcutDict = require("./dict") - , WordcutCore = require("./wordcut_core") - , PathInfoBuilder = require("./path_info_builder") - , PathSelector = require("./path_selector") - , Acceptors = require("./acceptors") - , latinRules = require("./latin_rules") - , thaiRules = require("./thai_rules") - , _ = require("underscore"); - - -var Wordcut = Object.create(WordcutCore); -Wordcut.defaultPathInfoBuilder = PathInfoBuilder; -Wordcut.defaultPathSelector = PathSelector; -Wordcut.defaultAcceptors = Acceptors; -Wordcut.defaultLatinRules = latinRules; -Wordcut.defaultThaiRules = thaiRules; -Wordcut.defaultDict = WordcutDict; - - -Wordcut.initNoDict = function(dict_path) { - var self = this; - self.pathInfoBuilder = new self.defaultPathInfoBuilder; - self.pathSelector = new self.defaultPathSelector; - self.acceptors = new self.defaultAcceptors; - self.defaultLatinRules.forEach(function(rule) { - self.acceptors.creators.push(rule); - }); - self.defaultThaiRules.forEach(function(rule) { - self.acceptors.creators.push(rule); - }); -}; - -Wordcut.init = function(dict_path, withDefault, additionalWords) { - withDefault = withDefault || false; - this.initNoDict(); - var dict = _.clone(this.defaultDict); - dict.init(dict_path, withDefault, additionalWords); - this.acceptors.creators.push(dict); -}; - -module.exports = Wordcut; - -},{"./acceptors":1,"./dict":2,"./latin_rules":3,"./path_info_builder":4,"./path_selector":5,"./thai_rules":6,"./wordcut_core":8,"sys":28,"underscore":25}],8:[function(require,module,exports){ -var WordcutCore = { - - buildPath: function(text) { - var self = this - , path = self.pathSelector.createPath() - , leftBoundary = 0; - self.acceptors.reset(); - for (var i = 0; i < text.length; i++) { - var ch = text[i]; - self.acceptors.transit(ch); - - var possiblePathInfos = self - .pathInfoBuilder - .build(path, - self.acceptors.getFinalAcceptors(), - i, - leftBoundary, - text); - var selectedPath = self.pathSelector.selectPath(possiblePathInfos) - - path.push(selectedPath); - if (selectedPath.type !== "UNK") { - leftBoundary = i; - } - } - return path; - }, - - pathToRanges: function(path) { - var e = path.length - 1 - , ranges = []; - - while (e > 0) { - var info = path[e] - , s = info.p; - - if (info.merge !== undefined && ranges.length > 0) { - var r = ranges[ranges.length - 1]; - r.s = info.merge; - s = r.s; - } else { - ranges.push({s:s, e:e}); - } - e = s; - } - return ranges.reverse(); - }, - - rangesToText: function(text, ranges, delimiter) { - return ranges.map(function(r) { - return text.substring(r.s, r.e); - }).join(delimiter); - }, - - cut: function(text, delimiter) { - var path = this.buildPath(text) - , ranges = this.pathToRanges(path); - return this - .rangesToText(text, ranges, - (delimiter === undefined ? "|" : delimiter)); - }, - - cutIntoRanges: function(text, noText) { - var path = this.buildPath(text) - , ranges = this.pathToRanges(path); - - if (!noText) { - ranges.forEach(function(r) { - r.text = text.substring(r.s, r.e); - }); - } - return ranges; - }, - - cutIntoArray: function(text) { - var path = this.buildPath(text) - , ranges = this.pathToRanges(path); - - return ranges.map(function(r) { - return text.substring(r.s, r.e) - }); - } -}; - -module.exports = WordcutCore; - -},{}],9:[function(require,module,exports){ -// http://wiki.commonjs.org/wiki/Unit_Testing/1.0 -// -// THIS IS NOT TESTED NOR LIKELY TO WORK OUTSIDE V8! -// -// Originally from narwhal.js (http://narwhaljs.org) -// Copyright (c) 2009 Thomas Robinson <280north.com> -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the 'Software'), to -// deal in the Software without restriction, including without limitation the -// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -// sell copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -// ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -// when used in node, this will actually load the util module we depend on -// versus loading the builtin util module as happens otherwise -// this is a bug in node module loading as far as I am concerned -var util = require('util/'); - -var pSlice = Array.prototype.slice; -var hasOwn = Object.prototype.hasOwnProperty; - -// 1. The assert module provides functions that throw -// AssertionError's when particular conditions are not met. The -// assert module must conform to the following interface. - -var assert = module.exports = ok; - -// 2. The AssertionError is defined in assert. -// new assert.AssertionError({ message: message, -// actual: actual, -// expected: expected }) - -assert.AssertionError = function AssertionError(options) { - this.name = 'AssertionError'; - this.actual = options.actual; - this.expected = options.expected; - this.operator = options.operator; - if (options.message) { - this.message = options.message; - this.generatedMessage = false; - } else { - this.message = getMessage(this); - this.generatedMessage = true; - } - var stackStartFunction = options.stackStartFunction || fail; - - if (Error.captureStackTrace) { - Error.captureStackTrace(this, stackStartFunction); - } - else { - // non v8 browsers so we can have a stacktrace - var err = new Error(); - if (err.stack) { - var out = err.stack; - - // try to strip useless frames - var fn_name = stackStartFunction.name; - var idx = out.indexOf('\n' + fn_name); - if (idx >= 0) { - // once we have located the function frame - // we need to strip out everything before it (and its line) - var next_line = out.indexOf('\n', idx + 1); - out = out.substring(next_line + 1); - } - - this.stack = out; - } - } -}; - -// assert.AssertionError instanceof Error -util.inherits(assert.AssertionError, Error); - -function replacer(key, value) { - if (util.isUndefined(value)) { - return '' + value; - } - if (util.isNumber(value) && !isFinite(value)) { - return value.toString(); - } - if (util.isFunction(value) || util.isRegExp(value)) { - return value.toString(); - } - return value; -} - -function truncate(s, n) { - if (util.isString(s)) { - return s.length < n ? s : s.slice(0, n); - } else { - return s; - } -} - -function getMessage(self) { - return truncate(JSON.stringify(self.actual, replacer), 128) + ' ' + - self.operator + ' ' + - truncate(JSON.stringify(self.expected, replacer), 128); -} - -// At present only the three keys mentioned above are used and -// understood by the spec. Implementations or sub modules can pass -// other keys to the AssertionError's constructor - they will be -// ignored. - -// 3. All of the following functions must throw an AssertionError -// when a corresponding condition is not met, with a message that -// may be undefined if not provided. All assertion methods provide -// both the actual and expected values to the assertion error for -// display purposes. - -function fail(actual, expected, message, operator, stackStartFunction) { - throw new assert.AssertionError({ - message: message, - actual: actual, - expected: expected, - operator: operator, - stackStartFunction: stackStartFunction - }); -} - -// EXTENSION! allows for well behaved errors defined elsewhere. -assert.fail = fail; - -// 4. Pure assertion tests whether a value is truthy, as determined -// by !!guard. -// assert.ok(guard, message_opt); -// This statement is equivalent to assert.equal(true, !!guard, -// message_opt);. To test strictly for the value true, use -// assert.strictEqual(true, guard, message_opt);. - -function ok(value, message) { - if (!value) fail(value, true, message, '==', assert.ok); -} -assert.ok = ok; - -// 5. The equality assertion tests shallow, coercive equality with -// ==. -// assert.equal(actual, expected, message_opt); - -assert.equal = function equal(actual, expected, message) { - if (actual != expected) fail(actual, expected, message, '==', assert.equal); -}; - -// 6. The non-equality assertion tests for whether two objects are not equal -// with != assert.notEqual(actual, expected, message_opt); - -assert.notEqual = function notEqual(actual, expected, message) { - if (actual == expected) { - fail(actual, expected, message, '!=', assert.notEqual); - } -}; - -// 7. The equivalence assertion tests a deep equality relation. -// assert.deepEqual(actual, expected, message_opt); - -assert.deepEqual = function deepEqual(actual, expected, message) { - if (!_deepEqual(actual, expected)) { - fail(actual, expected, message, 'deepEqual', assert.deepEqual); - } -}; - -function _deepEqual(actual, expected) { - // 7.1. All identical values are equivalent, as determined by ===. - if (actual === expected) { - return true; - - } else if (util.isBuffer(actual) && util.isBuffer(expected)) { - if (actual.length != expected.length) return false; - - for (var i = 0; i < actual.length; i++) { - if (actual[i] !== expected[i]) return false; - } - - return true; - - // 7.2. If the expected value is a Date object, the actual value is - // equivalent if it is also a Date object that refers to the same time. - } else if (util.isDate(actual) && util.isDate(expected)) { - return actual.getTime() === expected.getTime(); - - // 7.3 If the expected value is a RegExp object, the actual value is - // equivalent if it is also a RegExp object with the same source and - // properties (`global`, `multiline`, `lastIndex`, `ignoreCase`). - } else if (util.isRegExp(actual) && util.isRegExp(expected)) { - return actual.source === expected.source && - actual.global === expected.global && - actual.multiline === expected.multiline && - actual.lastIndex === expected.lastIndex && - actual.ignoreCase === expected.ignoreCase; - - // 7.4. Other pairs that do not both pass typeof value == 'object', - // equivalence is determined by ==. - } else if (!util.isObject(actual) && !util.isObject(expected)) { - return actual == expected; - - // 7.5 For all other Object pairs, including Array objects, equivalence is - // determined by having the same number of owned properties (as verified - // with Object.prototype.hasOwnProperty.call), the same set of keys - // (although not necessarily the same order), equivalent values for every - // corresponding key, and an identical 'prototype' property. Note: this - // accounts for both named and indexed properties on Arrays. - } else { - return objEquiv(actual, expected); - } -} - -function isArguments(object) { - return Object.prototype.toString.call(object) == '[object Arguments]'; -} - -function objEquiv(a, b) { - if (util.isNullOrUndefined(a) || util.isNullOrUndefined(b)) - return false; - // an identical 'prototype' property. - if (a.prototype !== b.prototype) return false; - // if one is a primitive, the other must be same - if (util.isPrimitive(a) || util.isPrimitive(b)) { - return a === b; - } - var aIsArgs = isArguments(a), - bIsArgs = isArguments(b); - if ((aIsArgs && !bIsArgs) || (!aIsArgs && bIsArgs)) - return false; - if (aIsArgs) { - a = pSlice.call(a); - b = pSlice.call(b); - return _deepEqual(a, b); - } - var ka = objectKeys(a), - kb = objectKeys(b), - key, i; - // having the same number of owned properties (keys incorporates - // hasOwnProperty) - if (ka.length != kb.length) - return false; - //the same set of keys (although not necessarily the same order), - ka.sort(); - kb.sort(); - //~~~cheap key test - for (i = ka.length - 1; i >= 0; i--) { - if (ka[i] != kb[i]) - return false; - } - //equivalent values for every corresponding key, and - //~~~possibly expensive deep test - for (i = ka.length - 1; i >= 0; i--) { - key = ka[i]; - if (!_deepEqual(a[key], b[key])) return false; - } - return true; -} - -// 8. The non-equivalence assertion tests for any deep inequality. -// assert.notDeepEqual(actual, expected, message_opt); - -assert.notDeepEqual = function notDeepEqual(actual, expected, message) { - if (_deepEqual(actual, expected)) { - fail(actual, expected, message, 'notDeepEqual', assert.notDeepEqual); - } -}; - -// 9. The strict equality assertion tests strict equality, as determined by ===. -// assert.strictEqual(actual, expected, message_opt); - -assert.strictEqual = function strictEqual(actual, expected, message) { - if (actual !== expected) { - fail(actual, expected, message, '===', assert.strictEqual); - } -}; - -// 10. The strict non-equality assertion tests for strict inequality, as -// determined by !==. assert.notStrictEqual(actual, expected, message_opt); - -assert.notStrictEqual = function notStrictEqual(actual, expected, message) { - if (actual === expected) { - fail(actual, expected, message, '!==', assert.notStrictEqual); - } -}; - -function expectedException(actual, expected) { - if (!actual || !expected) { - return false; - } - - if (Object.prototype.toString.call(expected) == '[object RegExp]') { - return expected.test(actual); - } else if (actual instanceof expected) { - return true; - } else if (expected.call({}, actual) === true) { - return true; - } - - return false; -} - -function _throws(shouldThrow, block, expected, message) { - var actual; - - if (util.isString(expected)) { - message = expected; - expected = null; - } - - try { - block(); - } catch (e) { - actual = e; - } - - message = (expected && expected.name ? ' (' + expected.name + ').' : '.') + - (message ? ' ' + message : '.'); - - if (shouldThrow && !actual) { - fail(actual, expected, 'Missing expected exception' + message); - } - - if (!shouldThrow && expectedException(actual, expected)) { - fail(actual, expected, 'Got unwanted exception' + message); - } - - if ((shouldThrow && actual && expected && - !expectedException(actual, expected)) || (!shouldThrow && actual)) { - throw actual; - } -} - -// 11. Expected to throw an error: -// assert.throws(block, Error_opt, message_opt); - -assert.throws = function(block, /*optional*/error, /*optional*/message) { - _throws.apply(this, [true].concat(pSlice.call(arguments))); -}; - -// EXTENSION! This is annoying to write outside this module. -assert.doesNotThrow = function(block, /*optional*/message) { - _throws.apply(this, [false].concat(pSlice.call(arguments))); -}; - -assert.ifError = function(err) { if (err) {throw err;}}; - -var objectKeys = Object.keys || function (obj) { - var keys = []; - for (var key in obj) { - if (hasOwn.call(obj, key)) keys.push(key); - } - return keys; -}; - -},{"util/":28}],10:[function(require,module,exports){ -'use strict'; -module.exports = balanced; -function balanced(a, b, str) { - if (a instanceof RegExp) a = maybeMatch(a, str); - if (b instanceof RegExp) b = maybeMatch(b, str); - - var r = range(a, b, str); - - return r && { - start: r[0], - end: r[1], - pre: str.slice(0, r[0]), - body: str.slice(r[0] + a.length, r[1]), - post: str.slice(r[1] + b.length) - }; -} - -function maybeMatch(reg, str) { - var m = str.match(reg); - return m ? m[0] : null; -} - -balanced.range = range; -function range(a, b, str) { - var begs, beg, left, right, result; - var ai = str.indexOf(a); - var bi = str.indexOf(b, ai + 1); - var i = ai; - - if (ai >= 0 && bi > 0) { - begs = []; - left = str.length; - - while (i >= 0 && !result) { - if (i == ai) { - begs.push(i); - ai = str.indexOf(a, i + 1); - } else if (begs.length == 1) { - result = [ begs.pop(), bi ]; - } else { - beg = begs.pop(); - if (beg < left) { - left = beg; - right = bi; - } - - bi = str.indexOf(b, i + 1); - } - - i = ai < bi && ai >= 0 ? ai : bi; - } - - if (begs.length) { - result = [ left, right ]; - } - } - - return result; -} - -},{}],11:[function(require,module,exports){ -var concatMap = require('concat-map'); -var balanced = require('balanced-match'); - -module.exports = expandTop; - -var escSlash = '\0SLASH'+Math.random()+'\0'; -var escOpen = '\0OPEN'+Math.random()+'\0'; -var escClose = '\0CLOSE'+Math.random()+'\0'; -var escComma = '\0COMMA'+Math.random()+'\0'; -var escPeriod = '\0PERIOD'+Math.random()+'\0'; - -function numeric(str) { - return parseInt(str, 10) == str - ? parseInt(str, 10) - : str.charCodeAt(0); -} - -function escapeBraces(str) { - return str.split('\\\\').join(escSlash) - .split('\\{').join(escOpen) - .split('\\}').join(escClose) - .split('\\,').join(escComma) - .split('\\.').join(escPeriod); -} - -function unescapeBraces(str) { - return str.split(escSlash).join('\\') - .split(escOpen).join('{') - .split(escClose).join('}') - .split(escComma).join(',') - .split(escPeriod).join('.'); -} - - -// Basically just str.split(","), but handling cases -// where we have nested braced sections, which should be -// treated as individual members, like {a,{b,c},d} -function parseCommaParts(str) { - if (!str) - return ['']; - - var parts = []; - var m = balanced('{', '}', str); - - if (!m) - return str.split(','); - - var pre = m.pre; - var body = m.body; - var post = m.post; - var p = pre.split(','); - - p[p.length-1] += '{' + body + '}'; - var postParts = parseCommaParts(post); - if (post.length) { - p[p.length-1] += postParts.shift(); - p.push.apply(p, postParts); - } - - parts.push.apply(parts, p); - - return parts; -} - -function expandTop(str) { - if (!str) - return []; - - // I don't know why Bash 4.3 does this, but it does. - // Anything starting with {} will have the first two bytes preserved - // but *only* at the top level, so {},a}b will not expand to anything, - // but a{},b}c will be expanded to [a}c,abc]. - // One could argue that this is a bug in Bash, but since the goal of - // this module is to match Bash's rules, we escape a leading {} - if (str.substr(0, 2) === '{}') { - str = '\\{\\}' + str.substr(2); - } - - return expand(escapeBraces(str), true).map(unescapeBraces); -} - -function identity(e) { - return e; -} - -function embrace(str) { - return '{' + str + '}'; -} -function isPadded(el) { - return /^-?0\d/.test(el); -} - -function lte(i, y) { - return i <= y; -} -function gte(i, y) { - return i >= y; -} - -function expand(str, isTop) { - var expansions = []; - - var m = balanced('{', '}', str); - if (!m || /\$$/.test(m.pre)) return [str]; - - var isNumericSequence = /^-?\d+\.\.-?\d+(?:\.\.-?\d+)?$/.test(m.body); - var isAlphaSequence = /^[a-zA-Z]\.\.[a-zA-Z](?:\.\.-?\d+)?$/.test(m.body); - var isSequence = isNumericSequence || isAlphaSequence; - var isOptions = m.body.indexOf(',') >= 0; - if (!isSequence && !isOptions) { - // {a},b} - if (m.post.match(/,.*\}/)) { - str = m.pre + '{' + m.body + escClose + m.post; - return expand(str); - } - return [str]; - } - - var n; - if (isSequence) { - n = m.body.split(/\.\./); - } else { - n = parseCommaParts(m.body); - if (n.length === 1) { - // x{{a,b}}y ==> x{a}y x{b}y - n = expand(n[0], false).map(embrace); - if (n.length === 1) { - var post = m.post.length - ? expand(m.post, false) - : ['']; - return post.map(function(p) { - return m.pre + n[0] + p; - }); - } - } - } - - // at this point, n is the parts, and we know it's not a comma set - // with a single entry. - - // no need to expand pre, since it is guaranteed to be free of brace-sets - var pre = m.pre; - var post = m.post.length - ? expand(m.post, false) - : ['']; - - var N; - - if (isSequence) { - var x = numeric(n[0]); - var y = numeric(n[1]); - var width = Math.max(n[0].length, n[1].length) - var incr = n.length == 3 - ? Math.abs(numeric(n[2])) - : 1; - var test = lte; - var reverse = y < x; - if (reverse) { - incr *= -1; - test = gte; - } - var pad = n.some(isPadded); - - N = []; - - for (var i = x; test(i, y); i += incr) { - var c; - if (isAlphaSequence) { - c = String.fromCharCode(i); - if (c === '\\') - c = ''; - } else { - c = String(i); - if (pad) { - var need = width - c.length; - if (need > 0) { - var z = new Array(need + 1).join('0'); - if (i < 0) - c = '-' + z + c.slice(1); - else - c = z + c; - } - } - } - N.push(c); - } - } else { - N = concatMap(n, function(el) { return expand(el, false) }); - } - - for (var j = 0; j < N.length; j++) { - for (var k = 0; k < post.length; k++) { - var expansion = pre + N[j] + post[k]; - if (!isTop || isSequence || expansion) - expansions.push(expansion); - } - } - - return expansions; -} - - -},{"balanced-match":10,"concat-map":13}],12:[function(require,module,exports){ - -},{}],13:[function(require,module,exports){ -module.exports = function (xs, fn) { - var res = []; - for (var i = 0; i < xs.length; i++) { - var x = fn(xs[i], i); - if (isArray(x)) res.push.apply(res, x); - else res.push(x); - } - return res; -}; - -var isArray = Array.isArray || function (xs) { - return Object.prototype.toString.call(xs) === '[object Array]'; -}; - -},{}],14:[function(require,module,exports){ -// Copyright Joyent, Inc. and other Node contributors. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to permit -// persons to whom the Software is furnished to do so, subject to the -// following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN -// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -// USE OR OTHER DEALINGS IN THE SOFTWARE. - -function EventEmitter() { - this._events = this._events || {}; - this._maxListeners = this._maxListeners || undefined; -} -module.exports = EventEmitter; - -// Backwards-compat with node 0.10.x -EventEmitter.EventEmitter = EventEmitter; - -EventEmitter.prototype._events = undefined; -EventEmitter.prototype._maxListeners = undefined; - -// By default EventEmitters will print a warning if more than 10 listeners are -// added to it. This is a useful default which helps finding memory leaks. -EventEmitter.defaultMaxListeners = 10; - -// Obviously not all Emitters should be limited to 10. This function allows -// that to be increased. Set to zero for unlimited. -EventEmitter.prototype.setMaxListeners = function(n) { - if (!isNumber(n) || n < 0 || isNaN(n)) - throw TypeError('n must be a positive number'); - this._maxListeners = n; - return this; -}; - -EventEmitter.prototype.emit = function(type) { - var er, handler, len, args, i, listeners; - - if (!this._events) - this._events = {}; - - // If there is no 'error' event listener then throw. - if (type === 'error') { - if (!this._events.error || - (isObject(this._events.error) && !this._events.error.length)) { - er = arguments[1]; - if (er instanceof Error) { - throw er; // Unhandled 'error' event - } - throw TypeError('Uncaught, unspecified "error" event.'); - } - } - - handler = this._events[type]; - - if (isUndefined(handler)) - return false; - - if (isFunction(handler)) { - switch (arguments.length) { - // fast cases - case 1: - handler.call(this); - break; - case 2: - handler.call(this, arguments[1]); - break; - case 3: - handler.call(this, arguments[1], arguments[2]); - break; - // slower - default: - len = arguments.length; - args = new Array(len - 1); - for (i = 1; i < len; i++) - args[i - 1] = arguments[i]; - handler.apply(this, args); - } - } else if (isObject(handler)) { - len = arguments.length; - args = new Array(len - 1); - for (i = 1; i < len; i++) - args[i - 1] = arguments[i]; - - listeners = handler.slice(); - len = listeners.length; - for (i = 0; i < len; i++) - listeners[i].apply(this, args); - } - - return true; -}; - -EventEmitter.prototype.addListener = function(type, listener) { - var m; - - if (!isFunction(listener)) - throw TypeError('listener must be a function'); - - if (!this._events) - this._events = {}; - - // To avoid recursion in the case that type === "newListener"! Before - // adding it to the listeners, first emit "newListener". - if (this._events.newListener) - this.emit('newListener', type, - isFunction(listener.listener) ? - listener.listener : listener); - - if (!this._events[type]) - // Optimize the case of one listener. Don't need the extra array object. - this._events[type] = listener; - else if (isObject(this._events[type])) - // If we've already got an array, just append. - this._events[type].push(listener); - else - // Adding the second element, need to change to array. - this._events[type] = [this._events[type], listener]; - - // Check for listener leak - if (isObject(this._events[type]) && !this._events[type].warned) { - var m; - if (!isUndefined(this._maxListeners)) { - m = this._maxListeners; - } else { - m = EventEmitter.defaultMaxListeners; - } - - if (m && m > 0 && this._events[type].length > m) { - this._events[type].warned = true; - console.error('(node) warning: possible EventEmitter memory ' + - 'leak detected. %d listeners added. ' + - 'Use emitter.setMaxListeners() to increase limit.', - this._events[type].length); - if (typeof console.trace === 'function') { - // not supported in IE 10 - console.trace(); - } - } - } - - return this; -}; - -EventEmitter.prototype.on = EventEmitter.prototype.addListener; - -EventEmitter.prototype.once = function(type, listener) { - if (!isFunction(listener)) - throw TypeError('listener must be a function'); - - var fired = false; - - function g() { - this.removeListener(type, g); - - if (!fired) { - fired = true; - listener.apply(this, arguments); - } - } - - g.listener = listener; - this.on(type, g); - - return this; -}; - -// emits a 'removeListener' event iff the listener was removed -EventEmitter.prototype.removeListener = function(type, listener) { - var list, position, length, i; - - if (!isFunction(listener)) - throw TypeError('listener must be a function'); - - if (!this._events || !this._events[type]) - return this; - - list = this._events[type]; - length = list.length; - position = -1; - - if (list === listener || - (isFunction(list.listener) && list.listener === listener)) { - delete this._events[type]; - if (this._events.removeListener) - this.emit('removeListener', type, listener); - - } else if (isObject(list)) { - for (i = length; i-- > 0;) { - if (list[i] === listener || - (list[i].listener && list[i].listener === listener)) { - position = i; - break; - } - } - - if (position < 0) - return this; - - if (list.length === 1) { - list.length = 0; - delete this._events[type]; - } else { - list.splice(position, 1); - } - - if (this._events.removeListener) - this.emit('removeListener', type, listener); - } - - return this; -}; - -EventEmitter.prototype.removeAllListeners = function(type) { - var key, listeners; - - if (!this._events) - return this; - - // not listening for removeListener, no need to emit - if (!this._events.removeListener) { - if (arguments.length === 0) - this._events = {}; - else if (this._events[type]) - delete this._events[type]; - return this; - } - - // emit removeListener for all listeners on all events - if (arguments.length === 0) { - for (key in this._events) { - if (key === 'removeListener') continue; - this.removeAllListeners(key); - } - this.removeAllListeners('removeListener'); - this._events = {}; - return this; - } - - listeners = this._events[type]; - - if (isFunction(listeners)) { - this.removeListener(type, listeners); - } else { - // LIFO order - while (listeners.length) - this.removeListener(type, listeners[listeners.length - 1]); - } - delete this._events[type]; - - return this; -}; - -EventEmitter.prototype.listeners = function(type) { - var ret; - if (!this._events || !this._events[type]) - ret = []; - else if (isFunction(this._events[type])) - ret = [this._events[type]]; - else - ret = this._events[type].slice(); - return ret; -}; - -EventEmitter.listenerCount = function(emitter, type) { - var ret; - if (!emitter._events || !emitter._events[type]) - ret = 0; - else if (isFunction(emitter._events[type])) - ret = 1; - else - ret = emitter._events[type].length; - return ret; -}; - -function isFunction(arg) { - return typeof arg === 'function'; -} - -function isNumber(arg) { - return typeof arg === 'number'; -} - -function isObject(arg) { - return typeof arg === 'object' && arg !== null; -} - -function isUndefined(arg) { - return arg === void 0; -} - -},{}],15:[function(require,module,exports){ -(function (process){ -exports.alphasort = alphasort -exports.alphasorti = alphasorti -exports.setopts = setopts -exports.ownProp = ownProp -exports.makeAbs = makeAbs -exports.finish = finish -exports.mark = mark -exports.isIgnored = isIgnored -exports.childrenIgnored = childrenIgnored - -function ownProp (obj, field) { - return Object.prototype.hasOwnProperty.call(obj, field) -} - -var path = require("path") -var minimatch = require("minimatch") -var isAbsolute = require("path-is-absolute") -var Minimatch = minimatch.Minimatch - -function alphasorti (a, b) { - return a.toLowerCase().localeCompare(b.toLowerCase()) -} - -function alphasort (a, b) { - return a.localeCompare(b) -} - -function setupIgnores (self, options) { - self.ignore = options.ignore || [] - - if (!Array.isArray(self.ignore)) - self.ignore = [self.ignore] - - if (self.ignore.length) { - self.ignore = self.ignore.map(ignoreMap) - } -} - -function ignoreMap (pattern) { - var gmatcher = null - if (pattern.slice(-3) === '/**') { - var gpattern = pattern.replace(/(\/\*\*)+$/, '') - gmatcher = new Minimatch(gpattern) - } - - return { - matcher: new Minimatch(pattern), - gmatcher: gmatcher - } -} - -function setopts (self, pattern, options) { - if (!options) - options = {} - - // base-matching: just use globstar for that. - if (options.matchBase && -1 === pattern.indexOf("/")) { - if (options.noglobstar) { - throw new Error("base matching requires globstar") - } - pattern = "**/" + pattern - } - - self.silent = !!options.silent - self.pattern = pattern - self.strict = options.strict !== false - self.realpath = !!options.realpath - self.realpathCache = options.realpathCache || Object.create(null) - self.follow = !!options.follow - self.dot = !!options.dot - self.mark = !!options.mark - self.nodir = !!options.nodir - if (self.nodir) - self.mark = true - self.sync = !!options.sync - self.nounique = !!options.nounique - self.nonull = !!options.nonull - self.nosort = !!options.nosort - self.nocase = !!options.nocase - self.stat = !!options.stat - self.noprocess = !!options.noprocess - - self.maxLength = options.maxLength || Infinity - self.cache = options.cache || Object.create(null) - self.statCache = options.statCache || Object.create(null) - self.symlinks = options.symlinks || Object.create(null) - - setupIgnores(self, options) - - self.changedCwd = false - var cwd = process.cwd() - if (!ownProp(options, "cwd")) - self.cwd = cwd - else { - self.cwd = options.cwd - self.changedCwd = path.resolve(options.cwd) !== cwd - } - - self.root = options.root || path.resolve(self.cwd, "/") - self.root = path.resolve(self.root) - if (process.platform === "win32") - self.root = self.root.replace(/\\/g, "/") - - self.nomount = !!options.nomount - - // disable comments and negation unless the user explicitly - // passes in false as the option. - options.nonegate = options.nonegate === false ? false : true - options.nocomment = options.nocomment === false ? false : true - deprecationWarning(options) - - self.minimatch = new Minimatch(pattern, options) - self.options = self.minimatch.options -} - -// TODO(isaacs): remove entirely in v6 -// exported to reset in tests -exports.deprecationWarned -function deprecationWarning(options) { - if (!options.nonegate || !options.nocomment) { - if (process.noDeprecation !== true && !exports.deprecationWarned) { - var msg = 'glob WARNING: comments and negation will be disabled in v6' - if (process.throwDeprecation) - throw new Error(msg) - else if (process.traceDeprecation) - console.trace(msg) - else - console.error(msg) - - exports.deprecationWarned = true - } - } -} - -function finish (self) { - var nou = self.nounique - var all = nou ? [] : Object.create(null) - - for (var i = 0, l = self.matches.length; i < l; i ++) { - var matches = self.matches[i] - if (!matches || Object.keys(matches).length === 0) { - if (self.nonull) { - // do like the shell, and spit out the literal glob - var literal = self.minimatch.globSet[i] - if (nou) - all.push(literal) - else - all[literal] = true - } - } else { - // had matches - var m = Object.keys(matches) - if (nou) - all.push.apply(all, m) - else - m.forEach(function (m) { - all[m] = true - }) - } - } - - if (!nou) - all = Object.keys(all) - - if (!self.nosort) - all = all.sort(self.nocase ? alphasorti : alphasort) - - // at *some* point we statted all of these - if (self.mark) { - for (var i = 0; i < all.length; i++) { - all[i] = self._mark(all[i]) - } - if (self.nodir) { - all = all.filter(function (e) { - return !(/\/$/.test(e)) - }) - } - } - - if (self.ignore.length) - all = all.filter(function(m) { - return !isIgnored(self, m) - }) - - self.found = all -} - -function mark (self, p) { - var abs = makeAbs(self, p) - var c = self.cache[abs] - var m = p - if (c) { - var isDir = c === 'DIR' || Array.isArray(c) - var slash = p.slice(-1) === '/' - - if (isDir && !slash) - m += '/' - else if (!isDir && slash) - m = m.slice(0, -1) - - if (m !== p) { - var mabs = makeAbs(self, m) - self.statCache[mabs] = self.statCache[abs] - self.cache[mabs] = self.cache[abs] - } - } - - return m -} - -// lotta situps... -function makeAbs (self, f) { - var abs = f - if (f.charAt(0) === '/') { - abs = path.join(self.root, f) - } else if (isAbsolute(f) || f === '') { - abs = f - } else if (self.changedCwd) { - abs = path.resolve(self.cwd, f) - } else { - abs = path.resolve(f) - } - return abs -} - - -// Return true, if pattern ends with globstar '**', for the accompanying parent directory. -// Ex:- If node_modules/** is the pattern, add 'node_modules' to ignore list along with it's contents -function isIgnored (self, path) { - if (!self.ignore.length) - return false - - return self.ignore.some(function(item) { - return item.matcher.match(path) || !!(item.gmatcher && item.gmatcher.match(path)) - }) -} - -function childrenIgnored (self, path) { - if (!self.ignore.length) - return false - - return self.ignore.some(function(item) { - return !!(item.gmatcher && item.gmatcher.match(path)) - }) -} - -}).call(this,require('_process')) -},{"_process":24,"minimatch":20,"path":22,"path-is-absolute":23}],16:[function(require,module,exports){ -(function (process){ -// Approach: -// -// 1. Get the minimatch set -// 2. For each pattern in the set, PROCESS(pattern, false) -// 3. Store matches per-set, then uniq them -// -// PROCESS(pattern, inGlobStar) -// Get the first [n] items from pattern that are all strings -// Join these together. This is PREFIX. -// If there is no more remaining, then stat(PREFIX) and -// add to matches if it succeeds. END. -// -// If inGlobStar and PREFIX is symlink and points to dir -// set ENTRIES = [] -// else readdir(PREFIX) as ENTRIES -// If fail, END -// -// with ENTRIES -// If pattern[n] is GLOBSTAR -// // handle the case where the globstar match is empty -// // by pruning it out, and testing the resulting pattern -// PROCESS(pattern[0..n] + pattern[n+1 .. $], false) -// // handle other cases. -// for ENTRY in ENTRIES (not dotfiles) -// // attach globstar + tail onto the entry -// // Mark that this entry is a globstar match -// PROCESS(pattern[0..n] + ENTRY + pattern[n .. $], true) -// -// else // not globstar -// for ENTRY in ENTRIES (not dotfiles, unless pattern[n] is dot) -// Test ENTRY against pattern[n] -// If fails, continue -// If passes, PROCESS(pattern[0..n] + item + pattern[n+1 .. $]) -// -// Caveat: -// Cache all stats and readdirs results to minimize syscall. Since all -// we ever care about is existence and directory-ness, we can just keep -// `true` for files, and [children,...] for directories, or `false` for -// things that don't exist. - -module.exports = glob - -var fs = require('fs') -var minimatch = require('minimatch') -var Minimatch = minimatch.Minimatch -var inherits = require('inherits') -var EE = require('events').EventEmitter -var path = require('path') -var assert = require('assert') -var isAbsolute = require('path-is-absolute') -var globSync = require('./sync.js') -var common = require('./common.js') -var alphasort = common.alphasort -var alphasorti = common.alphasorti -var setopts = common.setopts -var ownProp = common.ownProp -var inflight = require('inflight') -var util = require('util') -var childrenIgnored = common.childrenIgnored -var isIgnored = common.isIgnored - -var once = require('once') - -function glob (pattern, options, cb) { - if (typeof options === 'function') cb = options, options = {} - if (!options) options = {} - - if (options.sync) { - if (cb) - throw new TypeError('callback provided to sync glob') - return globSync(pattern, options) - } - - return new Glob(pattern, options, cb) -} - -glob.sync = globSync -var GlobSync = glob.GlobSync = globSync.GlobSync - -// old api surface -glob.glob = glob - -glob.hasMagic = function (pattern, options_) { - var options = util._extend({}, options_) - options.noprocess = true - - var g = new Glob(pattern, options) - var set = g.minimatch.set - if (set.length > 1) - return true - - for (var j = 0; j < set[0].length; j++) { - if (typeof set[0][j] !== 'string') - return true - } - - return false -} - -glob.Glob = Glob -inherits(Glob, EE) -function Glob (pattern, options, cb) { - if (typeof options === 'function') { - cb = options - options = null - } - - if (options && options.sync) { - if (cb) - throw new TypeError('callback provided to sync glob') - return new GlobSync(pattern, options) - } - - if (!(this instanceof Glob)) - return new Glob(pattern, options, cb) - - setopts(this, pattern, options) - this._didRealPath = false - - // process each pattern in the minimatch set - var n = this.minimatch.set.length - - // The matches are stored as {: true,...} so that - // duplicates are automagically pruned. - // Later, we do an Object.keys() on these. - // Keep them as a list so we can fill in when nonull is set. - this.matches = new Array(n) - - if (typeof cb === 'function') { - cb = once(cb) - this.on('error', cb) - this.on('end', function (matches) { - cb(null, matches) - }) - } - - var self = this - var n = this.minimatch.set.length - this._processing = 0 - this.matches = new Array(n) - - this._emitQueue = [] - this._processQueue = [] - this.paused = false - - if (this.noprocess) - return this - - if (n === 0) - return done() - - for (var i = 0; i < n; i ++) { - this._process(this.minimatch.set[i], i, false, done) - } - - function done () { - --self._processing - if (self._processing <= 0) - self._finish() - } -} - -Glob.prototype._finish = function () { - assert(this instanceof Glob) - if (this.aborted) - return - - if (this.realpath && !this._didRealpath) - return this._realpath() - - common.finish(this) - this.emit('end', this.found) -} - -Glob.prototype._realpath = function () { - if (this._didRealpath) - return - - this._didRealpath = true - - var n = this.matches.length - if (n === 0) - return this._finish() - - var self = this - for (var i = 0; i < this.matches.length; i++) - this._realpathSet(i, next) - - function next () { - if (--n === 0) - self._finish() - } -} - -Glob.prototype._realpathSet = function (index, cb) { - var matchset = this.matches[index] - if (!matchset) - return cb() - - var found = Object.keys(matchset) - var self = this - var n = found.length - - if (n === 0) - return cb() - - var set = this.matches[index] = Object.create(null) - found.forEach(function (p, i) { - // If there's a problem with the stat, then it means that - // one or more of the links in the realpath couldn't be - // resolved. just return the abs value in that case. - p = self._makeAbs(p) - fs.realpath(p, self.realpathCache, function (er, real) { - if (!er) - set[real] = true - else if (er.syscall === 'stat') - set[p] = true - else - self.emit('error', er) // srsly wtf right here - - if (--n === 0) { - self.matches[index] = set - cb() - } - }) - }) -} - -Glob.prototype._mark = function (p) { - return common.mark(this, p) -} - -Glob.prototype._makeAbs = function (f) { - return common.makeAbs(this, f) -} - -Glob.prototype.abort = function () { - this.aborted = true - this.emit('abort') -} - -Glob.prototype.pause = function () { - if (!this.paused) { - this.paused = true - this.emit('pause') - } -} - -Glob.prototype.resume = function () { - if (this.paused) { - this.emit('resume') - this.paused = false - if (this._emitQueue.length) { - var eq = this._emitQueue.slice(0) - this._emitQueue.length = 0 - for (var i = 0; i < eq.length; i ++) { - var e = eq[i] - this._emitMatch(e[0], e[1]) - } - } - if (this._processQueue.length) { - var pq = this._processQueue.slice(0) - this._processQueue.length = 0 - for (var i = 0; i < pq.length; i ++) { - var p = pq[i] - this._processing-- - this._process(p[0], p[1], p[2], p[3]) - } - } - } -} - -Glob.prototype._process = function (pattern, index, inGlobStar, cb) { - assert(this instanceof Glob) - assert(typeof cb === 'function') - - if (this.aborted) - return - - this._processing++ - if (this.paused) { - this._processQueue.push([pattern, index, inGlobStar, cb]) - return - } - - //console.error('PROCESS %d', this._processing, pattern) - - // Get the first [n] parts of pattern that are all strings. - var n = 0 - while (typeof pattern[n] === 'string') { - n ++ - } - // now n is the index of the first one that is *not* a string. - - // see if there's anything else - var prefix - switch (n) { - // if not, then this is rather simple - case pattern.length: - this._processSimple(pattern.join('/'), index, cb) - return - - case 0: - // pattern *starts* with some non-trivial item. - // going to readdir(cwd), but not include the prefix in matches. - prefix = null - break - - default: - // pattern has some string bits in the front. - // whatever it starts with, whether that's 'absolute' like /foo/bar, - // or 'relative' like '../baz' - prefix = pattern.slice(0, n).join('/') - break - } - - var remain = pattern.slice(n) - - // get the list of entries. - var read - if (prefix === null) - read = '.' - else if (isAbsolute(prefix) || isAbsolute(pattern.join('/'))) { - if (!prefix || !isAbsolute(prefix)) - prefix = '/' + prefix - read = prefix - } else - read = prefix - - var abs = this._makeAbs(read) - - //if ignored, skip _processing - if (childrenIgnored(this, read)) - return cb() - - var isGlobStar = remain[0] === minimatch.GLOBSTAR - if (isGlobStar) - this._processGlobStar(prefix, read, abs, remain, index, inGlobStar, cb) - else - this._processReaddir(prefix, read, abs, remain, index, inGlobStar, cb) -} - -Glob.prototype._processReaddir = function (prefix, read, abs, remain, index, inGlobStar, cb) { - var self = this - this._readdir(abs, inGlobStar, function (er, entries) { - return self._processReaddir2(prefix, read, abs, remain, index, inGlobStar, entries, cb) - }) -} - -Glob.prototype._processReaddir2 = function (prefix, read, abs, remain, index, inGlobStar, entries, cb) { - - // if the abs isn't a dir, then nothing can match! - if (!entries) - return cb() - - // It will only match dot entries if it starts with a dot, or if - // dot is set. Stuff like @(.foo|.bar) isn't allowed. - var pn = remain[0] - var negate = !!this.minimatch.negate - var rawGlob = pn._glob - var dotOk = this.dot || rawGlob.charAt(0) === '.' - - var matchedEntries = [] - for (var i = 0; i < entries.length; i++) { - var e = entries[i] - if (e.charAt(0) !== '.' || dotOk) { - var m - if (negate && !prefix) { - m = !e.match(pn) - } else { - m = e.match(pn) - } - if (m) - matchedEntries.push(e) - } - } - - //console.error('prd2', prefix, entries, remain[0]._glob, matchedEntries) - - var len = matchedEntries.length - // If there are no matched entries, then nothing matches. - if (len === 0) - return cb() - - // if this is the last remaining pattern bit, then no need for - // an additional stat *unless* the user has specified mark or - // stat explicitly. We know they exist, since readdir returned - // them. - - if (remain.length === 1 && !this.mark && !this.stat) { - if (!this.matches[index]) - this.matches[index] = Object.create(null) - - for (var i = 0; i < len; i ++) { - var e = matchedEntries[i] - if (prefix) { - if (prefix !== '/') - e = prefix + '/' + e - else - e = prefix + e - } - - if (e.charAt(0) === '/' && !this.nomount) { - e = path.join(this.root, e) - } - this._emitMatch(index, e) - } - // This was the last one, and no stats were needed - return cb() - } - - // now test all matched entries as stand-ins for that part - // of the pattern. - remain.shift() - for (var i = 0; i < len; i ++) { - var e = matchedEntries[i] - var newPattern - if (prefix) { - if (prefix !== '/') - e = prefix + '/' + e - else - e = prefix + e - } - this._process([e].concat(remain), index, inGlobStar, cb) - } - cb() -} - -Glob.prototype._emitMatch = function (index, e) { - if (this.aborted) - return - - if (this.matches[index][e]) - return - - if (isIgnored(this, e)) - return - - if (this.paused) { - this._emitQueue.push([index, e]) - return - } - - var abs = this._makeAbs(e) - - if (this.nodir) { - var c = this.cache[abs] - if (c === 'DIR' || Array.isArray(c)) - return - } - - if (this.mark) - e = this._mark(e) - - this.matches[index][e] = true - - var st = this.statCache[abs] - if (st) - this.emit('stat', e, st) - - this.emit('match', e) -} - -Glob.prototype._readdirInGlobStar = function (abs, cb) { - if (this.aborted) - return - - // follow all symlinked directories forever - // just proceed as if this is a non-globstar situation - if (this.follow) - return this._readdir(abs, false, cb) - - var lstatkey = 'lstat\0' + abs - var self = this - var lstatcb = inflight(lstatkey, lstatcb_) - - if (lstatcb) - fs.lstat(abs, lstatcb) - - function lstatcb_ (er, lstat) { - if (er) - return cb() - - var isSym = lstat.isSymbolicLink() - self.symlinks[abs] = isSym - - // If it's not a symlink or a dir, then it's definitely a regular file. - // don't bother doing a readdir in that case. - if (!isSym && !lstat.isDirectory()) { - self.cache[abs] = 'FILE' - cb() - } else - self._readdir(abs, false, cb) - } -} - -Glob.prototype._readdir = function (abs, inGlobStar, cb) { - if (this.aborted) - return - - cb = inflight('readdir\0'+abs+'\0'+inGlobStar, cb) - if (!cb) - return - - //console.error('RD %j %j', +inGlobStar, abs) - if (inGlobStar && !ownProp(this.symlinks, abs)) - return this._readdirInGlobStar(abs, cb) - - if (ownProp(this.cache, abs)) { - var c = this.cache[abs] - if (!c || c === 'FILE') - return cb() - - if (Array.isArray(c)) - return cb(null, c) - } - - var self = this - fs.readdir(abs, readdirCb(this, abs, cb)) -} - -function readdirCb (self, abs, cb) { - return function (er, entries) { - if (er) - self._readdirError(abs, er, cb) - else - self._readdirEntries(abs, entries, cb) - } -} - -Glob.prototype._readdirEntries = function (abs, entries, cb) { - if (this.aborted) - return - - // if we haven't asked to stat everything, then just - // assume that everything in there exists, so we can avoid - // having to stat it a second time. - if (!this.mark && !this.stat) { - for (var i = 0; i < entries.length; i ++) { - var e = entries[i] - if (abs === '/') - e = abs + e - else - e = abs + '/' + e - this.cache[e] = true - } - } - - this.cache[abs] = entries - return cb(null, entries) -} - -Glob.prototype._readdirError = function (f, er, cb) { - if (this.aborted) - return - - // handle errors, and cache the information - switch (er.code) { - case 'ENOTSUP': // https://github.com/isaacs/node-glob/issues/205 - case 'ENOTDIR': // totally normal. means it *does* exist. - this.cache[this._makeAbs(f)] = 'FILE' - break - - case 'ENOENT': // not terribly unusual - case 'ELOOP': - case 'ENAMETOOLONG': - case 'UNKNOWN': - this.cache[this._makeAbs(f)] = false - break - - default: // some unusual error. Treat as failure. - this.cache[this._makeAbs(f)] = false - if (this.strict) { - this.emit('error', er) - // If the error is handled, then we abort - // if not, we threw out of here - this.abort() - } - if (!this.silent) - console.error('glob error', er) - break - } - - return cb() -} - -Glob.prototype._processGlobStar = function (prefix, read, abs, remain, index, inGlobStar, cb) { - var self = this - this._readdir(abs, inGlobStar, function (er, entries) { - self._processGlobStar2(prefix, read, abs, remain, index, inGlobStar, entries, cb) - }) -} - - -Glob.prototype._processGlobStar2 = function (prefix, read, abs, remain, index, inGlobStar, entries, cb) { - //console.error('pgs2', prefix, remain[0], entries) - - // no entries means not a dir, so it can never have matches - // foo.txt/** doesn't match foo.txt - if (!entries) - return cb() - - // test without the globstar, and with every child both below - // and replacing the globstar. - var remainWithoutGlobStar = remain.slice(1) - var gspref = prefix ? [ prefix ] : [] - var noGlobStar = gspref.concat(remainWithoutGlobStar) - - // the noGlobStar pattern exits the inGlobStar state - this._process(noGlobStar, index, false, cb) - - var isSym = this.symlinks[abs] - var len = entries.length - - // If it's a symlink, and we're in a globstar, then stop - if (isSym && inGlobStar) - return cb() - - for (var i = 0; i < len; i++) { - var e = entries[i] - if (e.charAt(0) === '.' && !this.dot) - continue - - // these two cases enter the inGlobStar state - var instead = gspref.concat(entries[i], remainWithoutGlobStar) - this._process(instead, index, true, cb) - - var below = gspref.concat(entries[i], remain) - this._process(below, index, true, cb) - } - - cb() -} - -Glob.prototype._processSimple = function (prefix, index, cb) { - // XXX review this. Shouldn't it be doing the mounting etc - // before doing stat? kinda weird? - var self = this - this._stat(prefix, function (er, exists) { - self._processSimple2(prefix, index, er, exists, cb) - }) -} -Glob.prototype._processSimple2 = function (prefix, index, er, exists, cb) { - - //console.error('ps2', prefix, exists) - - if (!this.matches[index]) - this.matches[index] = Object.create(null) - - // If it doesn't exist, then just mark the lack of results - if (!exists) - return cb() - - if (prefix && isAbsolute(prefix) && !this.nomount) { - var trail = /[\/\\]$/.test(prefix) - if (prefix.charAt(0) === '/') { - prefix = path.join(this.root, prefix) - } else { - prefix = path.resolve(this.root, prefix) - if (trail) - prefix += '/' - } - } - - if (process.platform === 'win32') - prefix = prefix.replace(/\\/g, '/') - - // Mark this as a match - this._emitMatch(index, prefix) - cb() -} - -// Returns either 'DIR', 'FILE', or false -Glob.prototype._stat = function (f, cb) { - var abs = this._makeAbs(f) - var needDir = f.slice(-1) === '/' - - if (f.length > this.maxLength) - return cb() - - if (!this.stat && ownProp(this.cache, abs)) { - var c = this.cache[abs] - - if (Array.isArray(c)) - c = 'DIR' - - // It exists, but maybe not how we need it - if (!needDir || c === 'DIR') - return cb(null, c) - - if (needDir && c === 'FILE') - return cb() - - // otherwise we have to stat, because maybe c=true - // if we know it exists, but not what it is. - } - - var exists - var stat = this.statCache[abs] - if (stat !== undefined) { - if (stat === false) - return cb(null, stat) - else { - var type = stat.isDirectory() ? 'DIR' : 'FILE' - if (needDir && type === 'FILE') - return cb() - else - return cb(null, type, stat) - } - } - - var self = this - var statcb = inflight('stat\0' + abs, lstatcb_) - if (statcb) - fs.lstat(abs, statcb) - - function lstatcb_ (er, lstat) { - if (lstat && lstat.isSymbolicLink()) { - // If it's a symlink, then treat it as the target, unless - // the target does not exist, then treat it as a file. - return fs.stat(abs, function (er, stat) { - if (er) - self._stat2(f, abs, null, lstat, cb) - else - self._stat2(f, abs, er, stat, cb) - }) - } else { - self._stat2(f, abs, er, lstat, cb) - } - } -} - -Glob.prototype._stat2 = function (f, abs, er, stat, cb) { - if (er) { - this.statCache[abs] = false - return cb() - } - - var needDir = f.slice(-1) === '/' - this.statCache[abs] = stat - - if (abs.slice(-1) === '/' && !stat.isDirectory()) - return cb(null, false, stat) - - var c = stat.isDirectory() ? 'DIR' : 'FILE' - this.cache[abs] = this.cache[abs] || c - - if (needDir && c !== 'DIR') - return cb() - - return cb(null, c, stat) -} - -}).call(this,require('_process')) -},{"./common.js":15,"./sync.js":17,"_process":24,"assert":9,"events":14,"fs":12,"inflight":18,"inherits":19,"minimatch":20,"once":21,"path":22,"path-is-absolute":23,"util":28}],17:[function(require,module,exports){ -(function (process){ -module.exports = globSync -globSync.GlobSync = GlobSync - -var fs = require('fs') -var minimatch = require('minimatch') -var Minimatch = minimatch.Minimatch -var Glob = require('./glob.js').Glob -var util = require('util') -var path = require('path') -var assert = require('assert') -var isAbsolute = require('path-is-absolute') -var common = require('./common.js') -var alphasort = common.alphasort -var alphasorti = common.alphasorti -var setopts = common.setopts -var ownProp = common.ownProp -var childrenIgnored = common.childrenIgnored - -function globSync (pattern, options) { - if (typeof options === 'function' || arguments.length === 3) - throw new TypeError('callback provided to sync glob\n'+ - 'See: https://github.com/isaacs/node-glob/issues/167') - - return new GlobSync(pattern, options).found -} - -function GlobSync (pattern, options) { - if (!pattern) - throw new Error('must provide pattern') - - if (typeof options === 'function' || arguments.length === 3) - throw new TypeError('callback provided to sync glob\n'+ - 'See: https://github.com/isaacs/node-glob/issues/167') - - if (!(this instanceof GlobSync)) - return new GlobSync(pattern, options) - - setopts(this, pattern, options) - - if (this.noprocess) - return this - - var n = this.minimatch.set.length - this.matches = new Array(n) - for (var i = 0; i < n; i ++) { - this._process(this.minimatch.set[i], i, false) - } - this._finish() -} - -GlobSync.prototype._finish = function () { - assert(this instanceof GlobSync) - if (this.realpath) { - var self = this - this.matches.forEach(function (matchset, index) { - var set = self.matches[index] = Object.create(null) - for (var p in matchset) { - try { - p = self._makeAbs(p) - var real = fs.realpathSync(p, self.realpathCache) - set[real] = true - } catch (er) { - if (er.syscall === 'stat') - set[self._makeAbs(p)] = true - else - throw er - } - } - }) - } - common.finish(this) -} - - -GlobSync.prototype._process = function (pattern, index, inGlobStar) { - assert(this instanceof GlobSync) - - // Get the first [n] parts of pattern that are all strings. - var n = 0 - while (typeof pattern[n] === 'string') { - n ++ - } - // now n is the index of the first one that is *not* a string. - - // See if there's anything else - var prefix - switch (n) { - // if not, then this is rather simple - case pattern.length: - this._processSimple(pattern.join('/'), index) - return - - case 0: - // pattern *starts* with some non-trivial item. - // going to readdir(cwd), but not include the prefix in matches. - prefix = null - break - - default: - // pattern has some string bits in the front. - // whatever it starts with, whether that's 'absolute' like /foo/bar, - // or 'relative' like '../baz' - prefix = pattern.slice(0, n).join('/') - break - } - - var remain = pattern.slice(n) - - // get the list of entries. - var read - if (prefix === null) - read = '.' - else if (isAbsolute(prefix) || isAbsolute(pattern.join('/'))) { - if (!prefix || !isAbsolute(prefix)) - prefix = '/' + prefix - read = prefix - } else - read = prefix - - var abs = this._makeAbs(read) - - //if ignored, skip processing - if (childrenIgnored(this, read)) - return - - var isGlobStar = remain[0] === minimatch.GLOBSTAR - if (isGlobStar) - this._processGlobStar(prefix, read, abs, remain, index, inGlobStar) - else - this._processReaddir(prefix, read, abs, remain, index, inGlobStar) -} - - -GlobSync.prototype._processReaddir = function (prefix, read, abs, remain, index, inGlobStar) { - var entries = this._readdir(abs, inGlobStar) - - // if the abs isn't a dir, then nothing can match! - if (!entries) - return - - // It will only match dot entries if it starts with a dot, or if - // dot is set. Stuff like @(.foo|.bar) isn't allowed. - var pn = remain[0] - var negate = !!this.minimatch.negate - var rawGlob = pn._glob - var dotOk = this.dot || rawGlob.charAt(0) === '.' - - var matchedEntries = [] - for (var i = 0; i < entries.length; i++) { - var e = entries[i] - if (e.charAt(0) !== '.' || dotOk) { - var m - if (negate && !prefix) { - m = !e.match(pn) - } else { - m = e.match(pn) - } - if (m) - matchedEntries.push(e) - } - } - - var len = matchedEntries.length - // If there are no matched entries, then nothing matches. - if (len === 0) - return - - // if this is the last remaining pattern bit, then no need for - // an additional stat *unless* the user has specified mark or - // stat explicitly. We know they exist, since readdir returned - // them. - - if (remain.length === 1 && !this.mark && !this.stat) { - if (!this.matches[index]) - this.matches[index] = Object.create(null) - - for (var i = 0; i < len; i ++) { - var e = matchedEntries[i] - if (prefix) { - if (prefix.slice(-1) !== '/') - e = prefix + '/' + e - else - e = prefix + e - } - - if (e.charAt(0) === '/' && !this.nomount) { - e = path.join(this.root, e) - } - this.matches[index][e] = true - } - // This was the last one, and no stats were needed - return - } - - // now test all matched entries as stand-ins for that part - // of the pattern. - remain.shift() - for (var i = 0; i < len; i ++) { - var e = matchedEntries[i] - var newPattern - if (prefix) - newPattern = [prefix, e] - else - newPattern = [e] - this._process(newPattern.concat(remain), index, inGlobStar) - } -} - - -GlobSync.prototype._emitMatch = function (index, e) { - var abs = this._makeAbs(e) - if (this.mark) - e = this._mark(e) - - if (this.matches[index][e]) - return - - if (this.nodir) { - var c = this.cache[this._makeAbs(e)] - if (c === 'DIR' || Array.isArray(c)) - return - } - - this.matches[index][e] = true - if (this.stat) - this._stat(e) -} - - -GlobSync.prototype._readdirInGlobStar = function (abs) { - // follow all symlinked directories forever - // just proceed as if this is a non-globstar situation - if (this.follow) - return this._readdir(abs, false) - - var entries - var lstat - var stat - try { - lstat = fs.lstatSync(abs) - } catch (er) { - // lstat failed, doesn't exist - return null - } - - var isSym = lstat.isSymbolicLink() - this.symlinks[abs] = isSym - - // If it's not a symlink or a dir, then it's definitely a regular file. - // don't bother doing a readdir in that case. - if (!isSym && !lstat.isDirectory()) - this.cache[abs] = 'FILE' - else - entries = this._readdir(abs, false) - - return entries -} - -GlobSync.prototype._readdir = function (abs, inGlobStar) { - var entries - - if (inGlobStar && !ownProp(this.symlinks, abs)) - return this._readdirInGlobStar(abs) - - if (ownProp(this.cache, abs)) { - var c = this.cache[abs] - if (!c || c === 'FILE') - return null - - if (Array.isArray(c)) - return c - } - - try { - return this._readdirEntries(abs, fs.readdirSync(abs)) - } catch (er) { - this._readdirError(abs, er) - return null - } -} - -GlobSync.prototype._readdirEntries = function (abs, entries) { - // if we haven't asked to stat everything, then just - // assume that everything in there exists, so we can avoid - // having to stat it a second time. - if (!this.mark && !this.stat) { - for (var i = 0; i < entries.length; i ++) { - var e = entries[i] - if (abs === '/') - e = abs + e - else - e = abs + '/' + e - this.cache[e] = true - } - } - - this.cache[abs] = entries - - // mark and cache dir-ness - return entries -} - -GlobSync.prototype._readdirError = function (f, er) { - // handle errors, and cache the information - switch (er.code) { - case 'ENOTSUP': // https://github.com/isaacs/node-glob/issues/205 - case 'ENOTDIR': // totally normal. means it *does* exist. - this.cache[this._makeAbs(f)] = 'FILE' - break - - case 'ENOENT': // not terribly unusual - case 'ELOOP': - case 'ENAMETOOLONG': - case 'UNKNOWN': - this.cache[this._makeAbs(f)] = false - break - - default: // some unusual error. Treat as failure. - this.cache[this._makeAbs(f)] = false - if (this.strict) - throw er - if (!this.silent) - console.error('glob error', er) - break - } -} - -GlobSync.prototype._processGlobStar = function (prefix, read, abs, remain, index, inGlobStar) { - - var entries = this._readdir(abs, inGlobStar) - - // no entries means not a dir, so it can never have matches - // foo.txt/** doesn't match foo.txt - if (!entries) - return - - // test without the globstar, and with every child both below - // and replacing the globstar. - var remainWithoutGlobStar = remain.slice(1) - var gspref = prefix ? [ prefix ] : [] - var noGlobStar = gspref.concat(remainWithoutGlobStar) - - // the noGlobStar pattern exits the inGlobStar state - this._process(noGlobStar, index, false) - - var len = entries.length - var isSym = this.symlinks[abs] - - // If it's a symlink, and we're in a globstar, then stop - if (isSym && inGlobStar) - return - - for (var i = 0; i < len; i++) { - var e = entries[i] - if (e.charAt(0) === '.' && !this.dot) - continue - - // these two cases enter the inGlobStar state - var instead = gspref.concat(entries[i], remainWithoutGlobStar) - this._process(instead, index, true) - - var below = gspref.concat(entries[i], remain) - this._process(below, index, true) - } -} - -GlobSync.prototype._processSimple = function (prefix, index) { - // XXX review this. Shouldn't it be doing the mounting etc - // before doing stat? kinda weird? - var exists = this._stat(prefix) - - if (!this.matches[index]) - this.matches[index] = Object.create(null) - - // If it doesn't exist, then just mark the lack of results - if (!exists) - return - - if (prefix && isAbsolute(prefix) && !this.nomount) { - var trail = /[\/\\]$/.test(prefix) - if (prefix.charAt(0) === '/') { - prefix = path.join(this.root, prefix) - } else { - prefix = path.resolve(this.root, prefix) - if (trail) - prefix += '/' - } - } - - if (process.platform === 'win32') - prefix = prefix.replace(/\\/g, '/') - - // Mark this as a match - this.matches[index][prefix] = true -} - -// Returns either 'DIR', 'FILE', or false -GlobSync.prototype._stat = function (f) { - var abs = this._makeAbs(f) - var needDir = f.slice(-1) === '/' - - if (f.length > this.maxLength) - return false - - if (!this.stat && ownProp(this.cache, abs)) { - var c = this.cache[abs] - - if (Array.isArray(c)) - c = 'DIR' - - // It exists, but maybe not how we need it - if (!needDir || c === 'DIR') - return c - - if (needDir && c === 'FILE') - return false - - // otherwise we have to stat, because maybe c=true - // if we know it exists, but not what it is. - } - - var exists - var stat = this.statCache[abs] - if (!stat) { - var lstat - try { - lstat = fs.lstatSync(abs) - } catch (er) { - return false - } - - if (lstat.isSymbolicLink()) { - try { - stat = fs.statSync(abs) - } catch (er) { - stat = lstat - } - } else { - stat = lstat - } - } - - this.statCache[abs] = stat - - var c = stat.isDirectory() ? 'DIR' : 'FILE' - this.cache[abs] = this.cache[abs] || c - - if (needDir && c !== 'DIR') - return false - - return c -} - -GlobSync.prototype._mark = function (p) { - return common.mark(this, p) -} - -GlobSync.prototype._makeAbs = function (f) { - return common.makeAbs(this, f) -} - -}).call(this,require('_process')) -},{"./common.js":15,"./glob.js":16,"_process":24,"assert":9,"fs":12,"minimatch":20,"path":22,"path-is-absolute":23,"util":28}],18:[function(require,module,exports){ -(function (process){ -var wrappy = require('wrappy') -var reqs = Object.create(null) -var once = require('once') - -module.exports = wrappy(inflight) - -function inflight (key, cb) { - if (reqs[key]) { - reqs[key].push(cb) - return null - } else { - reqs[key] = [cb] - return makeres(key) - } -} - -function makeres (key) { - return once(function RES () { - var cbs = reqs[key] - var len = cbs.length - var args = slice(arguments) - - // XXX It's somewhat ambiguous whether a new callback added in this - // pass should be queued for later execution if something in the - // list of callbacks throws, or if it should just be discarded. - // However, it's such an edge case that it hardly matters, and either - // choice is likely as surprising as the other. - // As it happens, we do go ahead and schedule it for later execution. - try { - for (var i = 0; i < len; i++) { - cbs[i].apply(null, args) - } - } finally { - if (cbs.length > len) { - // added more in the interim. - // de-zalgo, just in case, but don't call again. - cbs.splice(0, len) - process.nextTick(function () { - RES.apply(null, args) - }) - } else { - delete reqs[key] - } - } - }) -} - -function slice (args) { - var length = args.length - var array = [] - - for (var i = 0; i < length; i++) array[i] = args[i] - return array -} - -}).call(this,require('_process')) -},{"_process":24,"once":21,"wrappy":29}],19:[function(require,module,exports){ -if (typeof Object.create === 'function') { - // implementation from standard node.js 'util' module - module.exports = function inherits(ctor, superCtor) { - ctor.super_ = superCtor - ctor.prototype = Object.create(superCtor.prototype, { - constructor: { - value: ctor, - enumerable: false, - writable: true, - configurable: true - } - }); - }; -} else { - // old school shim for old browsers - module.exports = function inherits(ctor, superCtor) { - ctor.super_ = superCtor - var TempCtor = function () {} - TempCtor.prototype = superCtor.prototype - ctor.prototype = new TempCtor() - ctor.prototype.constructor = ctor - } -} - -},{}],20:[function(require,module,exports){ -module.exports = minimatch -minimatch.Minimatch = Minimatch - -var path = { sep: '/' } -try { - path = require('path') -} catch (er) {} - -var GLOBSTAR = minimatch.GLOBSTAR = Minimatch.GLOBSTAR = {} -var expand = require('brace-expansion') - -var plTypes = { - '!': { open: '(?:(?!(?:', close: '))[^/]*?)'}, - '?': { open: '(?:', close: ')?' }, - '+': { open: '(?:', close: ')+' }, - '*': { open: '(?:', close: ')*' }, - '@': { open: '(?:', close: ')' } -} - -// any single thing other than / -// don't need to escape / when using new RegExp() -var qmark = '[^/]' - -// * => any number of characters -var star = qmark + '*?' - -// ** when dots are allowed. Anything goes, except .. and . -// not (^ or / followed by one or two dots followed by $ or /), -// followed by anything, any number of times. -var twoStarDot = '(?:(?!(?:\\\/|^)(?:\\.{1,2})($|\\\/)).)*?' - -// not a ^ or / followed by a dot, -// followed by anything, any number of times. -var twoStarNoDot = '(?:(?!(?:\\\/|^)\\.).)*?' - -// characters that need to be escaped in RegExp. -var reSpecials = charSet('().*{}+?[]^$\\!') - -// "abc" -> { a:true, b:true, c:true } -function charSet (s) { - return s.split('').reduce(function (set, c) { - set[c] = true - return set - }, {}) -} - -// normalizes slashes. -var slashSplit = /\/+/ - -minimatch.filter = filter -function filter (pattern, options) { - options = options || {} - return function (p, i, list) { - return minimatch(p, pattern, options) - } -} - -function ext (a, b) { - a = a || {} - b = b || {} - var t = {} - Object.keys(b).forEach(function (k) { - t[k] = b[k] - }) - Object.keys(a).forEach(function (k) { - t[k] = a[k] - }) - return t -} - -minimatch.defaults = function (def) { - if (!def || !Object.keys(def).length) return minimatch - - var orig = minimatch - - var m = function minimatch (p, pattern, options) { - return orig.minimatch(p, pattern, ext(def, options)) - } - - m.Minimatch = function Minimatch (pattern, options) { - return new orig.Minimatch(pattern, ext(def, options)) - } - - return m -} - -Minimatch.defaults = function (def) { - if (!def || !Object.keys(def).length) return Minimatch - return minimatch.defaults(def).Minimatch -} - -function minimatch (p, pattern, options) { - if (typeof pattern !== 'string') { - throw new TypeError('glob pattern string required') - } - - if (!options) options = {} - - // shortcut: comments match nothing. - if (!options.nocomment && pattern.charAt(0) === '#') { - return false - } - - // "" only matches "" - if (pattern.trim() === '') return p === '' - - return new Minimatch(pattern, options).match(p) -} - -function Minimatch (pattern, options) { - if (!(this instanceof Minimatch)) { - return new Minimatch(pattern, options) - } - - if (typeof pattern !== 'string') { - throw new TypeError('glob pattern string required') - } - - if (!options) options = {} - pattern = pattern.trim() - - // windows support: need to use /, not \ - if (path.sep !== '/') { - pattern = pattern.split(path.sep).join('/') - } - - this.options = options - this.set = [] - this.pattern = pattern - this.regexp = null - this.negate = false - this.comment = false - this.empty = false - - // make the set of regexps etc. - this.make() -} - -Minimatch.prototype.debug = function () {} - -Minimatch.prototype.make = make -function make () { - // don't do it more than once. - if (this._made) return - - var pattern = this.pattern - var options = this.options - - // empty patterns and comments match nothing. - if (!options.nocomment && pattern.charAt(0) === '#') { - this.comment = true - return - } - if (!pattern) { - this.empty = true - return - } - - // step 1: figure out negation, etc. - this.parseNegate() - - // step 2: expand braces - var set = this.globSet = this.braceExpand() - - if (options.debug) this.debug = console.error - - this.debug(this.pattern, set) - - // step 3: now we have a set, so turn each one into a series of path-portion - // matching patterns. - // These will be regexps, except in the case of "**", which is - // set to the GLOBSTAR object for globstar behavior, - // and will not contain any / characters - set = this.globParts = set.map(function (s) { - return s.split(slashSplit) - }) - - this.debug(this.pattern, set) - - // glob --> regexps - set = set.map(function (s, si, set) { - return s.map(this.parse, this) - }, this) - - this.debug(this.pattern, set) - - // filter out everything that didn't compile properly. - set = set.filter(function (s) { - return s.indexOf(false) === -1 - }) - - this.debug(this.pattern, set) - - this.set = set -} - -Minimatch.prototype.parseNegate = parseNegate -function parseNegate () { - var pattern = this.pattern - var negate = false - var options = this.options - var negateOffset = 0 - - if (options.nonegate) return - - for (var i = 0, l = pattern.length - ; i < l && pattern.charAt(i) === '!' - ; i++) { - negate = !negate - negateOffset++ - } - - if (negateOffset) this.pattern = pattern.substr(negateOffset) - this.negate = negate -} - -// Brace expansion: -// a{b,c}d -> abd acd -// a{b,}c -> abc ac -// a{0..3}d -> a0d a1d a2d a3d -// a{b,c{d,e}f}g -> abg acdfg acefg -// a{b,c}d{e,f}g -> abdeg acdeg abdeg abdfg -// -// Invalid sets are not expanded. -// a{2..}b -> a{2..}b -// a{b}c -> a{b}c -minimatch.braceExpand = function (pattern, options) { - return braceExpand(pattern, options) -} - -Minimatch.prototype.braceExpand = braceExpand - -function braceExpand (pattern, options) { - if (!options) { - if (this instanceof Minimatch) { - options = this.options - } else { - options = {} - } - } - - pattern = typeof pattern === 'undefined' - ? this.pattern : pattern - - if (typeof pattern === 'undefined') { - throw new TypeError('undefined pattern') - } - - if (options.nobrace || - !pattern.match(/\{.*\}/)) { - // shortcut. no need to expand. - return [pattern] - } - - return expand(pattern) -} - -// parse a component of the expanded set. -// At this point, no pattern may contain "/" in it -// so we're going to return a 2d array, where each entry is the full -// pattern, split on '/', and then turned into a regular expression. -// A regexp is made at the end which joins each array with an -// escaped /, and another full one which joins each regexp with |. -// -// Following the lead of Bash 4.1, note that "**" only has special meaning -// when it is the *only* thing in a path portion. Otherwise, any series -// of * is equivalent to a single *. Globstar behavior is enabled by -// default, and can be disabled by setting options.noglobstar. -Minimatch.prototype.parse = parse -var SUBPARSE = {} -function parse (pattern, isSub) { - if (pattern.length > 1024 * 64) { - throw new TypeError('pattern is too long') - } - - var options = this.options - - // shortcuts - if (!options.noglobstar && pattern === '**') return GLOBSTAR - if (pattern === '') return '' - - var re = '' - var hasMagic = !!options.nocase - var escaping = false - // ? => one single character - var patternListStack = [] - var negativeLists = [] - var stateChar - var inClass = false - var reClassStart = -1 - var classStart = -1 - // . and .. never match anything that doesn't start with ., - // even when options.dot is set. - var patternStart = pattern.charAt(0) === '.' ? '' // anything - // not (start or / followed by . or .. followed by / or end) - : options.dot ? '(?!(?:^|\\\/)\\.{1,2}(?:$|\\\/))' - : '(?!\\.)' - var self = this - - function clearStateChar () { - if (stateChar) { - // we had some state-tracking character - // that wasn't consumed by this pass. - switch (stateChar) { - case '*': - re += star - hasMagic = true - break - case '?': - re += qmark - hasMagic = true - break - default: - re += '\\' + stateChar - break - } - self.debug('clearStateChar %j %j', stateChar, re) - stateChar = false - } - } - - for (var i = 0, len = pattern.length, c - ; (i < len) && (c = pattern.charAt(i)) - ; i++) { - this.debug('%s\t%s %s %j', pattern, i, re, c) - - // skip over any that are escaped. - if (escaping && reSpecials[c]) { - re += '\\' + c - escaping = false - continue - } - - switch (c) { - case '/': - // completely not allowed, even escaped. - // Should already be path-split by now. - return false - - case '\\': - clearStateChar() - escaping = true - continue - - // the various stateChar values - // for the "extglob" stuff. - case '?': - case '*': - case '+': - case '@': - case '!': - this.debug('%s\t%s %s %j <-- stateChar', pattern, i, re, c) - - // all of those are literals inside a class, except that - // the glob [!a] means [^a] in regexp - if (inClass) { - this.debug(' in class') - if (c === '!' && i === classStart + 1) c = '^' - re += c - continue - } - - // if we already have a stateChar, then it means - // that there was something like ** or +? in there. - // Handle the stateChar, then proceed with this one. - self.debug('call clearStateChar %j', stateChar) - clearStateChar() - stateChar = c - // if extglob is disabled, then +(asdf|foo) isn't a thing. - // just clear the statechar *now*, rather than even diving into - // the patternList stuff. - if (options.noext) clearStateChar() - continue - - case '(': - if (inClass) { - re += '(' - continue - } - - if (!stateChar) { - re += '\\(' - continue - } - - patternListStack.push({ - type: stateChar, - start: i - 1, - reStart: re.length, - open: plTypes[stateChar].open, - close: plTypes[stateChar].close - }) - // negation is (?:(?!js)[^/]*) - re += stateChar === '!' ? '(?:(?!(?:' : '(?:' - this.debug('plType %j %j', stateChar, re) - stateChar = false - continue - - case ')': - if (inClass || !patternListStack.length) { - re += '\\)' - continue - } - - clearStateChar() - hasMagic = true - var pl = patternListStack.pop() - // negation is (?:(?!js)[^/]*) - // The others are (?:) - re += pl.close - if (pl.type === '!') { - negativeLists.push(pl) - } - pl.reEnd = re.length - continue - - case '|': - if (inClass || !patternListStack.length || escaping) { - re += '\\|' - escaping = false - continue - } - - clearStateChar() - re += '|' - continue - - // these are mostly the same in regexp and glob - case '[': - // swallow any state-tracking char before the [ - clearStateChar() - - if (inClass) { - re += '\\' + c - continue - } - - inClass = true - classStart = i - reClassStart = re.length - re += c - continue - - case ']': - // a right bracket shall lose its special - // meaning and represent itself in - // a bracket expression if it occurs - // first in the list. -- POSIX.2 2.8.3.2 - if (i === classStart + 1 || !inClass) { - re += '\\' + c - escaping = false - continue - } - - // handle the case where we left a class open. - // "[z-a]" is valid, equivalent to "\[z-a\]" - if (inClass) { - // split where the last [ was, make sure we don't have - // an invalid re. if so, re-walk the contents of the - // would-be class to re-translate any characters that - // were passed through as-is - // TODO: It would probably be faster to determine this - // without a try/catch and a new RegExp, but it's tricky - // to do safely. For now, this is safe and works. - var cs = pattern.substring(classStart + 1, i) - try { - RegExp('[' + cs + ']') - } catch (er) { - // not a valid class! - var sp = this.parse(cs, SUBPARSE) - re = re.substr(0, reClassStart) + '\\[' + sp[0] + '\\]' - hasMagic = hasMagic || sp[1] - inClass = false - continue - } - } - - // finish up the class. - hasMagic = true - inClass = false - re += c - continue - - default: - // swallow any state char that wasn't consumed - clearStateChar() - - if (escaping) { - // no need - escaping = false - } else if (reSpecials[c] - && !(c === '^' && inClass)) { - re += '\\' - } - - re += c - - } // switch - } // for - - // handle the case where we left a class open. - // "[abc" is valid, equivalent to "\[abc" - if (inClass) { - // split where the last [ was, and escape it - // this is a huge pita. We now have to re-walk - // the contents of the would-be class to re-translate - // any characters that were passed through as-is - cs = pattern.substr(classStart + 1) - sp = this.parse(cs, SUBPARSE) - re = re.substr(0, reClassStart) + '\\[' + sp[0] - hasMagic = hasMagic || sp[1] - } - - // handle the case where we had a +( thing at the *end* - // of the pattern. - // each pattern list stack adds 3 chars, and we need to go through - // and escape any | chars that were passed through as-is for the regexp. - // Go through and escape them, taking care not to double-escape any - // | chars that were already escaped. - for (pl = patternListStack.pop(); pl; pl = patternListStack.pop()) { - var tail = re.slice(pl.reStart + pl.open.length) - this.debug('setting tail', re, pl) - // maybe some even number of \, then maybe 1 \, followed by a | - tail = tail.replace(/((?:\\{2}){0,64})(\\?)\|/g, function (_, $1, $2) { - if (!$2) { - // the | isn't already escaped, so escape it. - $2 = '\\' - } - - // need to escape all those slashes *again*, without escaping the - // one that we need for escaping the | character. As it works out, - // escaping an even number of slashes can be done by simply repeating - // it exactly after itself. That's why this trick works. - // - // I am sorry that you have to see this. - return $1 + $1 + $2 + '|' - }) - - this.debug('tail=%j\n %s', tail, tail, pl, re) - var t = pl.type === '*' ? star - : pl.type === '?' ? qmark - : '\\' + pl.type - - hasMagic = true - re = re.slice(0, pl.reStart) + t + '\\(' + tail - } - - // handle trailing things that only matter at the very end. - clearStateChar() - if (escaping) { - // trailing \\ - re += '\\\\' - } - - // only need to apply the nodot start if the re starts with - // something that could conceivably capture a dot - var addPatternStart = false - switch (re.charAt(0)) { - case '.': - case '[': - case '(': addPatternStart = true - } - - // Hack to work around lack of negative lookbehind in JS - // A pattern like: *.!(x).!(y|z) needs to ensure that a name - // like 'a.xyz.yz' doesn't match. So, the first negative - // lookahead, has to look ALL the way ahead, to the end of - // the pattern. - for (var n = negativeLists.length - 1; n > -1; n--) { - var nl = negativeLists[n] - - var nlBefore = re.slice(0, nl.reStart) - var nlFirst = re.slice(nl.reStart, nl.reEnd - 8) - var nlLast = re.slice(nl.reEnd - 8, nl.reEnd) - var nlAfter = re.slice(nl.reEnd) - - nlLast += nlAfter - - // Handle nested stuff like *(*.js|!(*.json)), where open parens - // mean that we should *not* include the ) in the bit that is considered - // "after" the negated section. - var openParensBefore = nlBefore.split('(').length - 1 - var cleanAfter = nlAfter - for (i = 0; i < openParensBefore; i++) { - cleanAfter = cleanAfter.replace(/\)[+*?]?/, '') - } - nlAfter = cleanAfter - - var dollar = '' - if (nlAfter === '' && isSub !== SUBPARSE) { - dollar = '$' - } - var newRe = nlBefore + nlFirst + nlAfter + dollar + nlLast - re = newRe - } - - // if the re is not "" at this point, then we need to make sure - // it doesn't match against an empty path part. - // Otherwise a/* will match a/, which it should not. - if (re !== '' && hasMagic) { - re = '(?=.)' + re - } - - if (addPatternStart) { - re = patternStart + re - } - - // parsing just a piece of a larger pattern. - if (isSub === SUBPARSE) { - return [re, hasMagic] - } - - // skip the regexp for non-magical patterns - // unescape anything in it, though, so that it'll be - // an exact match against a file etc. - if (!hasMagic) { - return globUnescape(pattern) - } - - var flags = options.nocase ? 'i' : '' - try { - var regExp = new RegExp('^' + re + '$', flags) - } catch (er) { - // If it was an invalid regular expression, then it can't match - // anything. This trick looks for a character after the end of - // the string, which is of course impossible, except in multi-line - // mode, but it's not a /m regex. - return new RegExp('$.') - } - - regExp._glob = pattern - regExp._src = re - - return regExp -} - -minimatch.makeRe = function (pattern, options) { - return new Minimatch(pattern, options || {}).makeRe() -} - -Minimatch.prototype.makeRe = makeRe -function makeRe () { - if (this.regexp || this.regexp === false) return this.regexp - - // at this point, this.set is a 2d array of partial - // pattern strings, or "**". - // - // It's better to use .match(). This function shouldn't - // be used, really, but it's pretty convenient sometimes, - // when you just want to work with a regex. - var set = this.set - - if (!set.length) { - this.regexp = false - return this.regexp - } - var options = this.options - - var twoStar = options.noglobstar ? star - : options.dot ? twoStarDot - : twoStarNoDot - var flags = options.nocase ? 'i' : '' - - var re = set.map(function (pattern) { - return pattern.map(function (p) { - return (p === GLOBSTAR) ? twoStar - : (typeof p === 'string') ? regExpEscape(p) - : p._src - }).join('\\\/') - }).join('|') - - // must match entire pattern - // ending in a * or ** will make it less strict. - re = '^(?:' + re + ')$' - - // can match anything, as long as it's not this. - if (this.negate) re = '^(?!' + re + ').*$' - - try { - this.regexp = new RegExp(re, flags) - } catch (ex) { - this.regexp = false - } - return this.regexp -} - -minimatch.match = function (list, pattern, options) { - options = options || {} - var mm = new Minimatch(pattern, options) - list = list.filter(function (f) { - return mm.match(f) - }) - if (mm.options.nonull && !list.length) { - list.push(pattern) - } - return list -} - -Minimatch.prototype.match = match -function match (f, partial) { - this.debug('match', f, this.pattern) - // short-circuit in the case of busted things. - // comments, etc. - if (this.comment) return false - if (this.empty) return f === '' - - if (f === '/' && partial) return true - - var options = this.options - - // windows: need to use /, not \ - if (path.sep !== '/') { - f = f.split(path.sep).join('/') - } - - // treat the test path as a set of pathparts. - f = f.split(slashSplit) - this.debug(this.pattern, 'split', f) - - // just ONE of the pattern sets in this.set needs to match - // in order for it to be valid. If negating, then just one - // match means that we have failed. - // Either way, return on the first hit. - - var set = this.set - this.debug(this.pattern, 'set', set) - - // Find the basename of the path by looking for the last non-empty segment - var filename - var i - for (i = f.length - 1; i >= 0; i--) { - filename = f[i] - if (filename) break - } - - for (i = 0; i < set.length; i++) { - var pattern = set[i] - var file = f - if (options.matchBase && pattern.length === 1) { - file = [filename] - } - var hit = this.matchOne(file, pattern, partial) - if (hit) { - if (options.flipNegate) return true - return !this.negate - } - } - - // didn't get any hits. this is success if it's a negative - // pattern, failure otherwise. - if (options.flipNegate) return false - return this.negate -} - -// set partial to true to test if, for example, -// "/a/b" matches the start of "/*/b/*/d" -// Partial means, if you run out of file before you run -// out of pattern, then that's fine, as long as all -// the parts match. -Minimatch.prototype.matchOne = function (file, pattern, partial) { - var options = this.options - - this.debug('matchOne', - { 'this': this, file: file, pattern: pattern }) - - this.debug('matchOne', file.length, pattern.length) - - for (var fi = 0, - pi = 0, - fl = file.length, - pl = pattern.length - ; (fi < fl) && (pi < pl) - ; fi++, pi++) { - this.debug('matchOne loop') - var p = pattern[pi] - var f = file[fi] - - this.debug(pattern, p, f) - - // should be impossible. - // some invalid regexp stuff in the set. - if (p === false) return false - - if (p === GLOBSTAR) { - this.debug('GLOBSTAR', [pattern, p, f]) - - // "**" - // a/**/b/**/c would match the following: - // a/b/x/y/z/c - // a/x/y/z/b/c - // a/b/x/b/x/c - // a/b/c - // To do this, take the rest of the pattern after - // the **, and see if it would match the file remainder. - // If so, return success. - // If not, the ** "swallows" a segment, and try again. - // This is recursively awful. - // - // a/**/b/**/c matching a/b/x/y/z/c - // - a matches a - // - doublestar - // - matchOne(b/x/y/z/c, b/**/c) - // - b matches b - // - doublestar - // - matchOne(x/y/z/c, c) -> no - // - matchOne(y/z/c, c) -> no - // - matchOne(z/c, c) -> no - // - matchOne(c, c) yes, hit - var fr = fi - var pr = pi + 1 - if (pr === pl) { - this.debug('** at the end') - // a ** at the end will just swallow the rest. - // We have found a match. - // however, it will not swallow /.x, unless - // options.dot is set. - // . and .. are *never* matched by **, for explosively - // exponential reasons. - for (; fi < fl; fi++) { - if (file[fi] === '.' || file[fi] === '..' || - (!options.dot && file[fi].charAt(0) === '.')) return false - } - return true - } - - // ok, let's see if we can swallow whatever we can. - while (fr < fl) { - var swallowee = file[fr] - - this.debug('\nglobstar while', file, fr, pattern, pr, swallowee) - - // XXX remove this slice. Just pass the start index. - if (this.matchOne(file.slice(fr), pattern.slice(pr), partial)) { - this.debug('globstar found match!', fr, fl, swallowee) - // found a match. - return true - } else { - // can't swallow "." or ".." ever. - // can only swallow ".foo" when explicitly asked. - if (swallowee === '.' || swallowee === '..' || - (!options.dot && swallowee.charAt(0) === '.')) { - this.debug('dot detected!', file, fr, pattern, pr) - break - } - - // ** swallows a segment, and continue. - this.debug('globstar swallow a segment, and continue') - fr++ - } - } - - // no match was found. - // However, in partial mode, we can't say this is necessarily over. - // If there's more *pattern* left, then - if (partial) { - // ran out of file - this.debug('\n>>> no match, partial?', file, fr, pattern, pr) - if (fr === fl) return true - } - return false - } - - // something other than ** - // non-magic patterns just have to match exactly - // patterns with magic have been turned into regexps. - var hit - if (typeof p === 'string') { - if (options.nocase) { - hit = f.toLowerCase() === p.toLowerCase() - } else { - hit = f === p - } - this.debug('string match', p, f, hit) - } else { - hit = f.match(p) - this.debug('pattern match', p, f, hit) - } - - if (!hit) return false - } - - // Note: ending in / means that we'll get a final "" - // at the end of the pattern. This can only match a - // corresponding "" at the end of the file. - // If the file ends in /, then it can only match a - // a pattern that ends in /, unless the pattern just - // doesn't have any more for it. But, a/b/ should *not* - // match "a/b/*", even though "" matches against the - // [^/]*? pattern, except in partial mode, where it might - // simply not be reached yet. - // However, a/b/ should still satisfy a/* - - // now either we fell off the end of the pattern, or we're done. - if (fi === fl && pi === pl) { - // ran out of pattern and filename at the same time. - // an exact hit! - return true - } else if (fi === fl) { - // ran out of file, but still had pattern left. - // this is ok if we're doing the match as part of - // a glob fs traversal. - return partial - } else if (pi === pl) { - // ran out of pattern, still have file left. - // this is only acceptable if we're on the very last - // empty segment of a file with a trailing slash. - // a/* should match a/b/ - var emptyFileEnd = (fi === fl - 1) && (file[fi] === '') - return emptyFileEnd - } - - // should be unreachable. - throw new Error('wtf?') -} - -// replace stuff like \* with * -function globUnescape (s) { - return s.replace(/\\(.)/g, '$1') -} - -function regExpEscape (s) { - return s.replace(/[-[\]{}()*+?.,\\^$|#\s]/g, '\\$&') -} - -},{"brace-expansion":11,"path":22}],21:[function(require,module,exports){ -var wrappy = require('wrappy') -module.exports = wrappy(once) -module.exports.strict = wrappy(onceStrict) - -once.proto = once(function () { - Object.defineProperty(Function.prototype, 'once', { - value: function () { - return once(this) - }, - configurable: true - }) - - Object.defineProperty(Function.prototype, 'onceStrict', { - value: function () { - return onceStrict(this) - }, - configurable: true - }) -}) - -function once (fn) { - var f = function () { - if (f.called) return f.value - f.called = true - return f.value = fn.apply(this, arguments) - } - f.called = false - return f -} - -function onceStrict (fn) { - var f = function () { - if (f.called) - throw new Error(f.onceError) - f.called = true - return f.value = fn.apply(this, arguments) - } - var name = fn.name || 'Function wrapped with `once`' - f.onceError = name + " shouldn't be called more than once" - f.called = false - return f -} - -},{"wrappy":29}],22:[function(require,module,exports){ -(function (process){ -// Copyright Joyent, Inc. and other Node contributors. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to permit -// persons to whom the Software is furnished to do so, subject to the -// following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN -// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -// USE OR OTHER DEALINGS IN THE SOFTWARE. - -// resolves . and .. elements in a path array with directory names there -// must be no slashes, empty elements, or device names (c:\) in the array -// (so also no leading and trailing slashes - it does not distinguish -// relative and absolute paths) -function normalizeArray(parts, allowAboveRoot) { - // if the path tries to go above the root, `up` ends up > 0 - var up = 0; - for (var i = parts.length - 1; i >= 0; i--) { - var last = parts[i]; - if (last === '.') { - parts.splice(i, 1); - } else if (last === '..') { - parts.splice(i, 1); - up++; - } else if (up) { - parts.splice(i, 1); - up--; - } - } - - // if the path is allowed to go above the root, restore leading ..s - if (allowAboveRoot) { - for (; up--; up) { - parts.unshift('..'); - } - } - - return parts; -} - -// Split a filename into [root, dir, basename, ext], unix version -// 'root' is just a slash, or nothing. -var splitPathRe = - /^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/; -var splitPath = function(filename) { - return splitPathRe.exec(filename).slice(1); -}; - -// path.resolve([from ...], to) -// posix version -exports.resolve = function() { - var resolvedPath = '', - resolvedAbsolute = false; - - for (var i = arguments.length - 1; i >= -1 && !resolvedAbsolute; i--) { - var path = (i >= 0) ? arguments[i] : process.cwd(); - - // Skip empty and invalid entries - if (typeof path !== 'string') { - throw new TypeError('Arguments to path.resolve must be strings'); - } else if (!path) { - continue; - } - - resolvedPath = path + '/' + resolvedPath; - resolvedAbsolute = path.charAt(0) === '/'; - } - - // At this point the path should be resolved to a full absolute path, but - // handle relative paths to be safe (might happen when process.cwd() fails) - - // Normalize the path - resolvedPath = normalizeArray(filter(resolvedPath.split('/'), function(p) { - return !!p; - }), !resolvedAbsolute).join('/'); - - return ((resolvedAbsolute ? '/' : '') + resolvedPath) || '.'; -}; - -// path.normalize(path) -// posix version -exports.normalize = function(path) { - var isAbsolute = exports.isAbsolute(path), - trailingSlash = substr(path, -1) === '/'; - - // Normalize the path - path = normalizeArray(filter(path.split('/'), function(p) { - return !!p; - }), !isAbsolute).join('/'); - - if (!path && !isAbsolute) { - path = '.'; - } - if (path && trailingSlash) { - path += '/'; - } - - return (isAbsolute ? '/' : '') + path; -}; - -// posix version -exports.isAbsolute = function(path) { - return path.charAt(0) === '/'; -}; - -// posix version -exports.join = function() { - var paths = Array.prototype.slice.call(arguments, 0); - return exports.normalize(filter(paths, function(p, index) { - if (typeof p !== 'string') { - throw new TypeError('Arguments to path.join must be strings'); - } - return p; - }).join('/')); -}; - - -// path.relative(from, to) -// posix version -exports.relative = function(from, to) { - from = exports.resolve(from).substr(1); - to = exports.resolve(to).substr(1); - - function trim(arr) { - var start = 0; - for (; start < arr.length; start++) { - if (arr[start] !== '') break; - } - - var end = arr.length - 1; - for (; end >= 0; end--) { - if (arr[end] !== '') break; - } - - if (start > end) return []; - return arr.slice(start, end - start + 1); - } - - var fromParts = trim(from.split('/')); - var toParts = trim(to.split('/')); - - var length = Math.min(fromParts.length, toParts.length); - var samePartsLength = length; - for (var i = 0; i < length; i++) { - if (fromParts[i] !== toParts[i]) { - samePartsLength = i; - break; - } - } - - var outputParts = []; - for (var i = samePartsLength; i < fromParts.length; i++) { - outputParts.push('..'); - } - - outputParts = outputParts.concat(toParts.slice(samePartsLength)); - - return outputParts.join('/'); -}; - -exports.sep = '/'; -exports.delimiter = ':'; - -exports.dirname = function(path) { - var result = splitPath(path), - root = result[0], - dir = result[1]; - - if (!root && !dir) { - // No dirname whatsoever - return '.'; - } - - if (dir) { - // It has a dirname, strip trailing slash - dir = dir.substr(0, dir.length - 1); - } - - return root + dir; -}; - - -exports.basename = function(path, ext) { - var f = splitPath(path)[2]; - // TODO: make this comparison case-insensitive on windows? - if (ext && f.substr(-1 * ext.length) === ext) { - f = f.substr(0, f.length - ext.length); - } - return f; -}; - - -exports.extname = function(path) { - return splitPath(path)[3]; -}; - -function filter (xs, f) { - if (xs.filter) return xs.filter(f); - var res = []; - for (var i = 0; i < xs.length; i++) { - if (f(xs[i], i, xs)) res.push(xs[i]); - } - return res; -} - -// String.prototype.substr - negative index don't work in IE8 -var substr = 'ab'.substr(-1) === 'b' - ? function (str, start, len) { return str.substr(start, len) } - : function (str, start, len) { - if (start < 0) start = str.length + start; - return str.substr(start, len); - } -; - -}).call(this,require('_process')) -},{"_process":24}],23:[function(require,module,exports){ -(function (process){ -'use strict'; - -function posix(path) { - return path.charAt(0) === '/'; -} - -function win32(path) { - // https://github.com/nodejs/node/blob/b3fcc245fb25539909ef1d5eaa01dbf92e168633/lib/path.js#L56 - var splitDeviceRe = /^([a-zA-Z]:|[\\\/]{2}[^\\\/]+[\\\/]+[^\\\/]+)?([\\\/])?([\s\S]*?)$/; - var result = splitDeviceRe.exec(path); - var device = result[1] || ''; - var isUnc = Boolean(device && device.charAt(1) !== ':'); - - // UNC paths are always absolute - return Boolean(result[2] || isUnc); -} - -module.exports = process.platform === 'win32' ? win32 : posix; -module.exports.posix = posix; -module.exports.win32 = win32; - -}).call(this,require('_process')) -},{"_process":24}],24:[function(require,module,exports){ -// shim for using process in browser -var process = module.exports = {}; - -// cached from whatever global is present so that test runners that stub it -// don't break things. But we need to wrap it in a try catch in case it is -// wrapped in strict mode code which doesn't define any globals. It's inside a -// function because try/catches deoptimize in certain engines. - -var cachedSetTimeout; -var cachedClearTimeout; - -function defaultSetTimout() { - throw new Error('setTimeout has not been defined'); -} -function defaultClearTimeout () { - throw new Error('clearTimeout has not been defined'); -} -(function () { - try { - if (typeof setTimeout === 'function') { - cachedSetTimeout = setTimeout; - } else { - cachedSetTimeout = defaultSetTimout; - } - } catch (e) { - cachedSetTimeout = defaultSetTimout; - } - try { - if (typeof clearTimeout === 'function') { - cachedClearTimeout = clearTimeout; - } else { - cachedClearTimeout = defaultClearTimeout; - } - } catch (e) { - cachedClearTimeout = defaultClearTimeout; - } -} ()) -function runTimeout(fun) { - if (cachedSetTimeout === setTimeout) { - //normal enviroments in sane situations - return setTimeout(fun, 0); - } - // if setTimeout wasn't available but was latter defined - if ((cachedSetTimeout === defaultSetTimout || !cachedSetTimeout) && setTimeout) { - cachedSetTimeout = setTimeout; - return setTimeout(fun, 0); - } - try { - // when when somebody has screwed with setTimeout but no I.E. maddness - return cachedSetTimeout(fun, 0); - } catch(e){ - try { - // When we are in I.E. but the script has been evaled so I.E. doesn't trust the global object when called normally - return cachedSetTimeout.call(null, fun, 0); - } catch(e){ - // same as above but when it's a version of I.E. that must have the global object for 'this', hopfully our context correct otherwise it will throw a global error - return cachedSetTimeout.call(this, fun, 0); - } - } - - -} -function runClearTimeout(marker) { - if (cachedClearTimeout === clearTimeout) { - //normal enviroments in sane situations - return clearTimeout(marker); - } - // if clearTimeout wasn't available but was latter defined - if ((cachedClearTimeout === defaultClearTimeout || !cachedClearTimeout) && clearTimeout) { - cachedClearTimeout = clearTimeout; - return clearTimeout(marker); - } - try { - // when when somebody has screwed with setTimeout but no I.E. maddness - return cachedClearTimeout(marker); - } catch (e){ - try { - // When we are in I.E. but the script has been evaled so I.E. doesn't trust the global object when called normally - return cachedClearTimeout.call(null, marker); - } catch (e){ - // same as above but when it's a version of I.E. that must have the global object for 'this', hopfully our context correct otherwise it will throw a global error. - // Some versions of I.E. have different rules for clearTimeout vs setTimeout - return cachedClearTimeout.call(this, marker); - } - } - - - -} -var queue = []; -var draining = false; -var currentQueue; -var queueIndex = -1; - -function cleanUpNextTick() { - if (!draining || !currentQueue) { - return; - } - draining = false; - if (currentQueue.length) { - queue = currentQueue.concat(queue); - } else { - queueIndex = -1; - } - if (queue.length) { - drainQueue(); - } -} - -function drainQueue() { - if (draining) { - return; - } - var timeout = runTimeout(cleanUpNextTick); - draining = true; - - var len = queue.length; - while(len) { - currentQueue = queue; - queue = []; - while (++queueIndex < len) { - if (currentQueue) { - currentQueue[queueIndex].run(); - } - } - queueIndex = -1; - len = queue.length; - } - currentQueue = null; - draining = false; - runClearTimeout(timeout); -} - -process.nextTick = function (fun) { - var args = new Array(arguments.length - 1); - if (arguments.length > 1) { - for (var i = 1; i < arguments.length; i++) { - args[i - 1] = arguments[i]; - } - } - queue.push(new Item(fun, args)); - if (queue.length === 1 && !draining) { - runTimeout(drainQueue); - } -}; - -// v8 likes predictible objects -function Item(fun, array) { - this.fun = fun; - this.array = array; -} -Item.prototype.run = function () { - this.fun.apply(null, this.array); -}; -process.title = 'browser'; -process.browser = true; -process.env = {}; -process.argv = []; -process.version = ''; // empty string to avoid regexp issues -process.versions = {}; - -function noop() {} - -process.on = noop; -process.addListener = noop; -process.once = noop; -process.off = noop; -process.removeListener = noop; -process.removeAllListeners = noop; -process.emit = noop; -process.prependListener = noop; -process.prependOnceListener = noop; - -process.listeners = function (name) { return [] } - -process.binding = function (name) { - throw new Error('process.binding is not supported'); -}; - -process.cwd = function () { return '/' }; -process.chdir = function (dir) { - throw new Error('process.chdir is not supported'); -}; -process.umask = function() { return 0; }; - -},{}],25:[function(require,module,exports){ -// Underscore.js 1.8.3 -// http://underscorejs.org -// (c) 2009-2015 Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors -// Underscore may be freely distributed under the MIT license. - -(function() { - - // Baseline setup - // -------------- - - // Establish the root object, `window` in the browser, or `exports` on the server. - var root = this; - - // Save the previous value of the `_` variable. - var previousUnderscore = root._; - - // Save bytes in the minified (but not gzipped) version: - var ArrayProto = Array.prototype, ObjProto = Object.prototype, FuncProto = Function.prototype; - - // Create quick reference variables for speed access to core prototypes. - var - push = ArrayProto.push, - slice = ArrayProto.slice, - toString = ObjProto.toString, - hasOwnProperty = ObjProto.hasOwnProperty; - - // All **ECMAScript 5** native function implementations that we hope to use - // are declared here. - var - nativeIsArray = Array.isArray, - nativeKeys = Object.keys, - nativeBind = FuncProto.bind, - nativeCreate = Object.create; - - // Naked function reference for surrogate-prototype-swapping. - var Ctor = function(){}; - - // Create a safe reference to the Underscore object for use below. - var _ = function(obj) { - if (obj instanceof _) return obj; - if (!(this instanceof _)) return new _(obj); - this._wrapped = obj; - }; - - // Export the Underscore object for **Node.js**, with - // backwards-compatibility for the old `require()` API. If we're in - // the browser, add `_` as a global object. - if (typeof exports !== 'undefined') { - if (typeof module !== 'undefined' && module.exports) { - exports = module.exports = _; - } - exports._ = _; - } else { - root._ = _; - } - - // Current version. - _.VERSION = '1.8.3'; - - // Internal function that returns an efficient (for current engines) version - // of the passed-in callback, to be repeatedly applied in other Underscore - // functions. - var optimizeCb = function(func, context, argCount) { - if (context === void 0) return func; - switch (argCount == null ? 3 : argCount) { - case 1: return function(value) { - return func.call(context, value); - }; - case 2: return function(value, other) { - return func.call(context, value, other); - }; - case 3: return function(value, index, collection) { - return func.call(context, value, index, collection); - }; - case 4: return function(accumulator, value, index, collection) { - return func.call(context, accumulator, value, index, collection); - }; - } - return function() { - return func.apply(context, arguments); - }; - }; - - // A mostly-internal function to generate callbacks that can be applied - // to each element in a collection, returning the desired result — either - // identity, an arbitrary callback, a property matcher, or a property accessor. - var cb = function(value, context, argCount) { - if (value == null) return _.identity; - if (_.isFunction(value)) return optimizeCb(value, context, argCount); - if (_.isObject(value)) return _.matcher(value); - return _.property(value); - }; - _.iteratee = function(value, context) { - return cb(value, context, Infinity); - }; - - // An internal function for creating assigner functions. - var createAssigner = function(keysFunc, undefinedOnly) { - return function(obj) { - var length = arguments.length; - if (length < 2 || obj == null) return obj; - for (var index = 1; index < length; index++) { - var source = arguments[index], - keys = keysFunc(source), - l = keys.length; - for (var i = 0; i < l; i++) { - var key = keys[i]; - if (!undefinedOnly || obj[key] === void 0) obj[key] = source[key]; - } - } - return obj; - }; - }; - - // An internal function for creating a new object that inherits from another. - var baseCreate = function(prototype) { - if (!_.isObject(prototype)) return {}; - if (nativeCreate) return nativeCreate(prototype); - Ctor.prototype = prototype; - var result = new Ctor; - Ctor.prototype = null; - return result; - }; - - var property = function(key) { - return function(obj) { - return obj == null ? void 0 : obj[key]; - }; - }; - - // Helper for collection methods to determine whether a collection - // should be iterated as an array or as an object - // Related: http://people.mozilla.org/~jorendorff/es6-draft.html#sec-tolength - // Avoids a very nasty iOS 8 JIT bug on ARM-64. #2094 - var MAX_ARRAY_INDEX = Math.pow(2, 53) - 1; - var getLength = property('length'); - var isArrayLike = function(collection) { - var length = getLength(collection); - return typeof length == 'number' && length >= 0 && length <= MAX_ARRAY_INDEX; - }; - - // Collection Functions - // -------------------- - - // The cornerstone, an `each` implementation, aka `forEach`. - // Handles raw objects in addition to array-likes. Treats all - // sparse array-likes as if they were dense. - _.each = _.forEach = function(obj, iteratee, context) { - iteratee = optimizeCb(iteratee, context); - var i, length; - if (isArrayLike(obj)) { - for (i = 0, length = obj.length; i < length; i++) { - iteratee(obj[i], i, obj); - } - } else { - var keys = _.keys(obj); - for (i = 0, length = keys.length; i < length; i++) { - iteratee(obj[keys[i]], keys[i], obj); - } - } - return obj; - }; - - // Return the results of applying the iteratee to each element. - _.map = _.collect = function(obj, iteratee, context) { - iteratee = cb(iteratee, context); - var keys = !isArrayLike(obj) && _.keys(obj), - length = (keys || obj).length, - results = Array(length); - for (var index = 0; index < length; index++) { - var currentKey = keys ? keys[index] : index; - results[index] = iteratee(obj[currentKey], currentKey, obj); - } - return results; - }; - - // Create a reducing function iterating left or right. - function createReduce(dir) { - // Optimized iterator function as using arguments.length - // in the main function will deoptimize the, see #1991. - function iterator(obj, iteratee, memo, keys, index, length) { - for (; index >= 0 && index < length; index += dir) { - var currentKey = keys ? keys[index] : index; - memo = iteratee(memo, obj[currentKey], currentKey, obj); - } - return memo; - } - - return function(obj, iteratee, memo, context) { - iteratee = optimizeCb(iteratee, context, 4); - var keys = !isArrayLike(obj) && _.keys(obj), - length = (keys || obj).length, - index = dir > 0 ? 0 : length - 1; - // Determine the initial value if none is provided. - if (arguments.length < 3) { - memo = obj[keys ? keys[index] : index]; - index += dir; - } - return iterator(obj, iteratee, memo, keys, index, length); - }; - } - - // **Reduce** builds up a single result from a list of values, aka `inject`, - // or `foldl`. - _.reduce = _.foldl = _.inject = createReduce(1); - - // The right-associative version of reduce, also known as `foldr`. - _.reduceRight = _.foldr = createReduce(-1); - - // Return the first value which passes a truth test. Aliased as `detect`. - _.find = _.detect = function(obj, predicate, context) { - var key; - if (isArrayLike(obj)) { - key = _.findIndex(obj, predicate, context); - } else { - key = _.findKey(obj, predicate, context); - } - if (key !== void 0 && key !== -1) return obj[key]; - }; - - // Return all the elements that pass a truth test. - // Aliased as `select`. - _.filter = _.select = function(obj, predicate, context) { - var results = []; - predicate = cb(predicate, context); - _.each(obj, function(value, index, list) { - if (predicate(value, index, list)) results.push(value); - }); - return results; - }; - - // Return all the elements for which a truth test fails. - _.reject = function(obj, predicate, context) { - return _.filter(obj, _.negate(cb(predicate)), context); - }; - - // Determine whether all of the elements match a truth test. - // Aliased as `all`. - _.every = _.all = function(obj, predicate, context) { - predicate = cb(predicate, context); - var keys = !isArrayLike(obj) && _.keys(obj), - length = (keys || obj).length; - for (var index = 0; index < length; index++) { - var currentKey = keys ? keys[index] : index; - if (!predicate(obj[currentKey], currentKey, obj)) return false; - } - return true; - }; - - // Determine if at least one element in the object matches a truth test. - // Aliased as `any`. - _.some = _.any = function(obj, predicate, context) { - predicate = cb(predicate, context); - var keys = !isArrayLike(obj) && _.keys(obj), - length = (keys || obj).length; - for (var index = 0; index < length; index++) { - var currentKey = keys ? keys[index] : index; - if (predicate(obj[currentKey], currentKey, obj)) return true; - } - return false; - }; - - // Determine if the array or object contains a given item (using `===`). - // Aliased as `includes` and `include`. - _.contains = _.includes = _.include = function(obj, item, fromIndex, guard) { - if (!isArrayLike(obj)) obj = _.values(obj); - if (typeof fromIndex != 'number' || guard) fromIndex = 0; - return _.indexOf(obj, item, fromIndex) >= 0; - }; - - // Invoke a method (with arguments) on every item in a collection. - _.invoke = function(obj, method) { - var args = slice.call(arguments, 2); - var isFunc = _.isFunction(method); - return _.map(obj, function(value) { - var func = isFunc ? method : value[method]; - return func == null ? func : func.apply(value, args); - }); - }; - - // Convenience version of a common use case of `map`: fetching a property. - _.pluck = function(obj, key) { - return _.map(obj, _.property(key)); - }; - - // Convenience version of a common use case of `filter`: selecting only objects - // containing specific `key:value` pairs. - _.where = function(obj, attrs) { - return _.filter(obj, _.matcher(attrs)); - }; - - // Convenience version of a common use case of `find`: getting the first object - // containing specific `key:value` pairs. - _.findWhere = function(obj, attrs) { - return _.find(obj, _.matcher(attrs)); - }; - - // Return the maximum element (or element-based computation). - _.max = function(obj, iteratee, context) { - var result = -Infinity, lastComputed = -Infinity, - value, computed; - if (iteratee == null && obj != null) { - obj = isArrayLike(obj) ? obj : _.values(obj); - for (var i = 0, length = obj.length; i < length; i++) { - value = obj[i]; - if (value > result) { - result = value; - } - } - } else { - iteratee = cb(iteratee, context); - _.each(obj, function(value, index, list) { - computed = iteratee(value, index, list); - if (computed > lastComputed || computed === -Infinity && result === -Infinity) { - result = value; - lastComputed = computed; - } - }); - } - return result; - }; - - // Return the minimum element (or element-based computation). - _.min = function(obj, iteratee, context) { - var result = Infinity, lastComputed = Infinity, - value, computed; - if (iteratee == null && obj != null) { - obj = isArrayLike(obj) ? obj : _.values(obj); - for (var i = 0, length = obj.length; i < length; i++) { - value = obj[i]; - if (value < result) { - result = value; - } - } - } else { - iteratee = cb(iteratee, context); - _.each(obj, function(value, index, list) { - computed = iteratee(value, index, list); - if (computed < lastComputed || computed === Infinity && result === Infinity) { - result = value; - lastComputed = computed; - } - }); - } - return result; - }; - - // Shuffle a collection, using the modern version of the - // [Fisher-Yates shuffle](http://en.wikipedia.org/wiki/Fisher–Yates_shuffle). - _.shuffle = function(obj) { - var set = isArrayLike(obj) ? obj : _.values(obj); - var length = set.length; - var shuffled = Array(length); - for (var index = 0, rand; index < length; index++) { - rand = _.random(0, index); - if (rand !== index) shuffled[index] = shuffled[rand]; - shuffled[rand] = set[index]; - } - return shuffled; - }; - - // Sample **n** random values from a collection. - // If **n** is not specified, returns a single random element. - // The internal `guard` argument allows it to work with `map`. - _.sample = function(obj, n, guard) { - if (n == null || guard) { - if (!isArrayLike(obj)) obj = _.values(obj); - return obj[_.random(obj.length - 1)]; - } - return _.shuffle(obj).slice(0, Math.max(0, n)); - }; - - // Sort the object's values by a criterion produced by an iteratee. - _.sortBy = function(obj, iteratee, context) { - iteratee = cb(iteratee, context); - return _.pluck(_.map(obj, function(value, index, list) { - return { - value: value, - index: index, - criteria: iteratee(value, index, list) - }; - }).sort(function(left, right) { - var a = left.criteria; - var b = right.criteria; - if (a !== b) { - if (a > b || a === void 0) return 1; - if (a < b || b === void 0) return -1; - } - return left.index - right.index; - }), 'value'); - }; - - // An internal function used for aggregate "group by" operations. - var group = function(behavior) { - return function(obj, iteratee, context) { - var result = {}; - iteratee = cb(iteratee, context); - _.each(obj, function(value, index) { - var key = iteratee(value, index, obj); - behavior(result, value, key); - }); - return result; - }; - }; - - // Groups the object's values by a criterion. Pass either a string attribute - // to group by, or a function that returns the criterion. - _.groupBy = group(function(result, value, key) { - if (_.has(result, key)) result[key].push(value); else result[key] = [value]; - }); - - // Indexes the object's values by a criterion, similar to `groupBy`, but for - // when you know that your index values will be unique. - _.indexBy = group(function(result, value, key) { - result[key] = value; - }); - - // Counts instances of an object that group by a certain criterion. Pass - // either a string attribute to count by, or a function that returns the - // criterion. - _.countBy = group(function(result, value, key) { - if (_.has(result, key)) result[key]++; else result[key] = 1; - }); - - // Safely create a real, live array from anything iterable. - _.toArray = function(obj) { - if (!obj) return []; - if (_.isArray(obj)) return slice.call(obj); - if (isArrayLike(obj)) return _.map(obj, _.identity); - return _.values(obj); - }; - - // Return the number of elements in an object. - _.size = function(obj) { - if (obj == null) return 0; - return isArrayLike(obj) ? obj.length : _.keys(obj).length; - }; - - // Split a collection into two arrays: one whose elements all satisfy the given - // predicate, and one whose elements all do not satisfy the predicate. - _.partition = function(obj, predicate, context) { - predicate = cb(predicate, context); - var pass = [], fail = []; - _.each(obj, function(value, key, obj) { - (predicate(value, key, obj) ? pass : fail).push(value); - }); - return [pass, fail]; - }; - - // Array Functions - // --------------- - - // Get the first element of an array. Passing **n** will return the first N - // values in the array. Aliased as `head` and `take`. The **guard** check - // allows it to work with `_.map`. - _.first = _.head = _.take = function(array, n, guard) { - if (array == null) return void 0; - if (n == null || guard) return array[0]; - return _.initial(array, array.length - n); - }; - - // Returns everything but the last entry of the array. Especially useful on - // the arguments object. Passing **n** will return all the values in - // the array, excluding the last N. - _.initial = function(array, n, guard) { - return slice.call(array, 0, Math.max(0, array.length - (n == null || guard ? 1 : n))); - }; - - // Get the last element of an array. Passing **n** will return the last N - // values in the array. - _.last = function(array, n, guard) { - if (array == null) return void 0; - if (n == null || guard) return array[array.length - 1]; - return _.rest(array, Math.max(0, array.length - n)); - }; - - // Returns everything but the first entry of the array. Aliased as `tail` and `drop`. - // Especially useful on the arguments object. Passing an **n** will return - // the rest N values in the array. - _.rest = _.tail = _.drop = function(array, n, guard) { - return slice.call(array, n == null || guard ? 1 : n); - }; - - // Trim out all falsy values from an array. - _.compact = function(array) { - return _.filter(array, _.identity); - }; - - // Internal implementation of a recursive `flatten` function. - var flatten = function(input, shallow, strict, startIndex) { - var output = [], idx = 0; - for (var i = startIndex || 0, length = getLength(input); i < length; i++) { - var value = input[i]; - if (isArrayLike(value) && (_.isArray(value) || _.isArguments(value))) { - //flatten current level of array or arguments object - if (!shallow) value = flatten(value, shallow, strict); - var j = 0, len = value.length; - output.length += len; - while (j < len) { - output[idx++] = value[j++]; - } - } else if (!strict) { - output[idx++] = value; - } - } - return output; - }; - - // Flatten out an array, either recursively (by default), or just one level. - _.flatten = function(array, shallow) { - return flatten(array, shallow, false); - }; - - // Return a version of the array that does not contain the specified value(s). - _.without = function(array) { - return _.difference(array, slice.call(arguments, 1)); - }; - - // Produce a duplicate-free version of the array. If the array has already - // been sorted, you have the option of using a faster algorithm. - // Aliased as `unique`. - _.uniq = _.unique = function(array, isSorted, iteratee, context) { - if (!_.isBoolean(isSorted)) { - context = iteratee; - iteratee = isSorted; - isSorted = false; - } - if (iteratee != null) iteratee = cb(iteratee, context); - var result = []; - var seen = []; - for (var i = 0, length = getLength(array); i < length; i++) { - var value = array[i], - computed = iteratee ? iteratee(value, i, array) : value; - if (isSorted) { - if (!i || seen !== computed) result.push(value); - seen = computed; - } else if (iteratee) { - if (!_.contains(seen, computed)) { - seen.push(computed); - result.push(value); - } - } else if (!_.contains(result, value)) { - result.push(value); - } - } - return result; - }; - - // Produce an array that contains the union: each distinct element from all of - // the passed-in arrays. - _.union = function() { - return _.uniq(flatten(arguments, true, true)); - }; - - // Produce an array that contains every item shared between all the - // passed-in arrays. - _.intersection = function(array) { - var result = []; - var argsLength = arguments.length; - for (var i = 0, length = getLength(array); i < length; i++) { - var item = array[i]; - if (_.contains(result, item)) continue; - for (var j = 1; j < argsLength; j++) { - if (!_.contains(arguments[j], item)) break; - } - if (j === argsLength) result.push(item); - } - return result; - }; - - // Take the difference between one array and a number of other arrays. - // Only the elements present in just the first array will remain. - _.difference = function(array) { - var rest = flatten(arguments, true, true, 1); - return _.filter(array, function(value){ - return !_.contains(rest, value); - }); - }; - - // Zip together multiple lists into a single array -- elements that share - // an index go together. - _.zip = function() { - return _.unzip(arguments); - }; - - // Complement of _.zip. Unzip accepts an array of arrays and groups - // each array's elements on shared indices - _.unzip = function(array) { - var length = array && _.max(array, getLength).length || 0; - var result = Array(length); - - for (var index = 0; index < length; index++) { - result[index] = _.pluck(array, index); - } - return result; - }; - - // Converts lists into objects. Pass either a single array of `[key, value]` - // pairs, or two parallel arrays of the same length -- one of keys, and one of - // the corresponding values. - _.object = function(list, values) { - var result = {}; - for (var i = 0, length = getLength(list); i < length; i++) { - if (values) { - result[list[i]] = values[i]; - } else { - result[list[i][0]] = list[i][1]; - } - } - return result; - }; - - // Generator function to create the findIndex and findLastIndex functions - function createPredicateIndexFinder(dir) { - return function(array, predicate, context) { - predicate = cb(predicate, context); - var length = getLength(array); - var index = dir > 0 ? 0 : length - 1; - for (; index >= 0 && index < length; index += dir) { - if (predicate(array[index], index, array)) return index; - } - return -1; - }; - } - - // Returns the first index on an array-like that passes a predicate test - _.findIndex = createPredicateIndexFinder(1); - _.findLastIndex = createPredicateIndexFinder(-1); - - // Use a comparator function to figure out the smallest index at which - // an object should be inserted so as to maintain order. Uses binary search. - _.sortedIndex = function(array, obj, iteratee, context) { - iteratee = cb(iteratee, context, 1); - var value = iteratee(obj); - var low = 0, high = getLength(array); - while (low < high) { - var mid = Math.floor((low + high) / 2); - if (iteratee(array[mid]) < value) low = mid + 1; else high = mid; - } - return low; - }; - - // Generator function to create the indexOf and lastIndexOf functions - function createIndexFinder(dir, predicateFind, sortedIndex) { - return function(array, item, idx) { - var i = 0, length = getLength(array); - if (typeof idx == 'number') { - if (dir > 0) { - i = idx >= 0 ? idx : Math.max(idx + length, i); - } else { - length = idx >= 0 ? Math.min(idx + 1, length) : idx + length + 1; - } - } else if (sortedIndex && idx && length) { - idx = sortedIndex(array, item); - return array[idx] === item ? idx : -1; - } - if (item !== item) { - idx = predicateFind(slice.call(array, i, length), _.isNaN); - return idx >= 0 ? idx + i : -1; - } - for (idx = dir > 0 ? i : length - 1; idx >= 0 && idx < length; idx += dir) { - if (array[idx] === item) return idx; - } - return -1; - }; - } - - // Return the position of the first occurrence of an item in an array, - // or -1 if the item is not included in the array. - // If the array is large and already in sort order, pass `true` - // for **isSorted** to use binary search. - _.indexOf = createIndexFinder(1, _.findIndex, _.sortedIndex); - _.lastIndexOf = createIndexFinder(-1, _.findLastIndex); - - // Generate an integer Array containing an arithmetic progression. A port of - // the native Python `range()` function. See - // [the Python documentation](http://docs.python.org/library/functions.html#range). - _.range = function(start, stop, step) { - if (stop == null) { - stop = start || 0; - start = 0; - } - step = step || 1; - - var length = Math.max(Math.ceil((stop - start) / step), 0); - var range = Array(length); - - for (var idx = 0; idx < length; idx++, start += step) { - range[idx] = start; - } - - return range; - }; - - // Function (ahem) Functions - // ------------------ - - // Determines whether to execute a function as a constructor - // or a normal function with the provided arguments - var executeBound = function(sourceFunc, boundFunc, context, callingContext, args) { - if (!(callingContext instanceof boundFunc)) return sourceFunc.apply(context, args); - var self = baseCreate(sourceFunc.prototype); - var result = sourceFunc.apply(self, args); - if (_.isObject(result)) return result; - return self; - }; - - // Create a function bound to a given object (assigning `this`, and arguments, - // optionally). Delegates to **ECMAScript 5**'s native `Function.bind` if - // available. - _.bind = function(func, context) { - if (nativeBind && func.bind === nativeBind) return nativeBind.apply(func, slice.call(arguments, 1)); - if (!_.isFunction(func)) throw new TypeError('Bind must be called on a function'); - var args = slice.call(arguments, 2); - var bound = function() { - return executeBound(func, bound, context, this, args.concat(slice.call(arguments))); - }; - return bound; - }; - - // Partially apply a function by creating a version that has had some of its - // arguments pre-filled, without changing its dynamic `this` context. _ acts - // as a placeholder, allowing any combination of arguments to be pre-filled. - _.partial = function(func) { - var boundArgs = slice.call(arguments, 1); - var bound = function() { - var position = 0, length = boundArgs.length; - var args = Array(length); - for (var i = 0; i < length; i++) { - args[i] = boundArgs[i] === _ ? arguments[position++] : boundArgs[i]; - } - while (position < arguments.length) args.push(arguments[position++]); - return executeBound(func, bound, this, this, args); - }; - return bound; - }; - - // Bind a number of an object's methods to that object. Remaining arguments - // are the method names to be bound. Useful for ensuring that all callbacks - // defined on an object belong to it. - _.bindAll = function(obj) { - var i, length = arguments.length, key; - if (length <= 1) throw new Error('bindAll must be passed function names'); - for (i = 1; i < length; i++) { - key = arguments[i]; - obj[key] = _.bind(obj[key], obj); - } - return obj; - }; - - // Memoize an expensive function by storing its results. - _.memoize = function(func, hasher) { - var memoize = function(key) { - var cache = memoize.cache; - var address = '' + (hasher ? hasher.apply(this, arguments) : key); - if (!_.has(cache, address)) cache[address] = func.apply(this, arguments); - return cache[address]; - }; - memoize.cache = {}; - return memoize; - }; - - // Delays a function for the given number of milliseconds, and then calls - // it with the arguments supplied. - _.delay = function(func, wait) { - var args = slice.call(arguments, 2); - return setTimeout(function(){ - return func.apply(null, args); - }, wait); - }; - - // Defers a function, scheduling it to run after the current call stack has - // cleared. - _.defer = _.partial(_.delay, _, 1); - - // Returns a function, that, when invoked, will only be triggered at most once - // during a given window of time. Normally, the throttled function will run - // as much as it can, without ever going more than once per `wait` duration; - // but if you'd like to disable the execution on the leading edge, pass - // `{leading: false}`. To disable execution on the trailing edge, ditto. - _.throttle = function(func, wait, options) { - var context, args, result; - var timeout = null; - var previous = 0; - if (!options) options = {}; - var later = function() { - previous = options.leading === false ? 0 : _.now(); - timeout = null; - result = func.apply(context, args); - if (!timeout) context = args = null; - }; - return function() { - var now = _.now(); - if (!previous && options.leading === false) previous = now; - var remaining = wait - (now - previous); - context = this; - args = arguments; - if (remaining <= 0 || remaining > wait) { - if (timeout) { - clearTimeout(timeout); - timeout = null; - } - previous = now; - result = func.apply(context, args); - if (!timeout) context = args = null; - } else if (!timeout && options.trailing !== false) { - timeout = setTimeout(later, remaining); - } - return result; - }; - }; - - // Returns a function, that, as long as it continues to be invoked, will not - // be triggered. The function will be called after it stops being called for - // N milliseconds. If `immediate` is passed, trigger the function on the - // leading edge, instead of the trailing. - _.debounce = function(func, wait, immediate) { - var timeout, args, context, timestamp, result; - - var later = function() { - var last = _.now() - timestamp; - - if (last < wait && last >= 0) { - timeout = setTimeout(later, wait - last); - } else { - timeout = null; - if (!immediate) { - result = func.apply(context, args); - if (!timeout) context = args = null; - } - } - }; - - return function() { - context = this; - args = arguments; - timestamp = _.now(); - var callNow = immediate && !timeout; - if (!timeout) timeout = setTimeout(later, wait); - if (callNow) { - result = func.apply(context, args); - context = args = null; - } - - return result; - }; - }; - - // Returns the first function passed as an argument to the second, - // allowing you to adjust arguments, run code before and after, and - // conditionally execute the original function. - _.wrap = function(func, wrapper) { - return _.partial(wrapper, func); - }; - - // Returns a negated version of the passed-in predicate. - _.negate = function(predicate) { - return function() { - return !predicate.apply(this, arguments); - }; - }; - - // Returns a function that is the composition of a list of functions, each - // consuming the return value of the function that follows. - _.compose = function() { - var args = arguments; - var start = args.length - 1; - return function() { - var i = start; - var result = args[start].apply(this, arguments); - while (i--) result = args[i].call(this, result); - return result; - }; - }; - - // Returns a function that will only be executed on and after the Nth call. - _.after = function(times, func) { - return function() { - if (--times < 1) { - return func.apply(this, arguments); - } - }; - }; - - // Returns a function that will only be executed up to (but not including) the Nth call. - _.before = function(times, func) { - var memo; - return function() { - if (--times > 0) { - memo = func.apply(this, arguments); - } - if (times <= 1) func = null; - return memo; - }; - }; - - // Returns a function that will be executed at most one time, no matter how - // often you call it. Useful for lazy initialization. - _.once = _.partial(_.before, 2); - - // Object Functions - // ---------------- - - // Keys in IE < 9 that won't be iterated by `for key in ...` and thus missed. - var hasEnumBug = !{toString: null}.propertyIsEnumerable('toString'); - var nonEnumerableProps = ['valueOf', 'isPrototypeOf', 'toString', - 'propertyIsEnumerable', 'hasOwnProperty', 'toLocaleString']; - - function collectNonEnumProps(obj, keys) { - var nonEnumIdx = nonEnumerableProps.length; - var constructor = obj.constructor; - var proto = (_.isFunction(constructor) && constructor.prototype) || ObjProto; - - // Constructor is a special case. - var prop = 'constructor'; - if (_.has(obj, prop) && !_.contains(keys, prop)) keys.push(prop); - - while (nonEnumIdx--) { - prop = nonEnumerableProps[nonEnumIdx]; - if (prop in obj && obj[prop] !== proto[prop] && !_.contains(keys, prop)) { - keys.push(prop); - } - } - } - - // Retrieve the names of an object's own properties. - // Delegates to **ECMAScript 5**'s native `Object.keys` - _.keys = function(obj) { - if (!_.isObject(obj)) return []; - if (nativeKeys) return nativeKeys(obj); - var keys = []; - for (var key in obj) if (_.has(obj, key)) keys.push(key); - // Ahem, IE < 9. - if (hasEnumBug) collectNonEnumProps(obj, keys); - return keys; - }; - - // Retrieve all the property names of an object. - _.allKeys = function(obj) { - if (!_.isObject(obj)) return []; - var keys = []; - for (var key in obj) keys.push(key); - // Ahem, IE < 9. - if (hasEnumBug) collectNonEnumProps(obj, keys); - return keys; - }; - - // Retrieve the values of an object's properties. - _.values = function(obj) { - var keys = _.keys(obj); - var length = keys.length; - var values = Array(length); - for (var i = 0; i < length; i++) { - values[i] = obj[keys[i]]; - } - return values; - }; - - // Returns the results of applying the iteratee to each element of the object - // In contrast to _.map it returns an object - _.mapObject = function(obj, iteratee, context) { - iteratee = cb(iteratee, context); - var keys = _.keys(obj), - length = keys.length, - results = {}, - currentKey; - for (var index = 0; index < length; index++) { - currentKey = keys[index]; - results[currentKey] = iteratee(obj[currentKey], currentKey, obj); - } - return results; - }; - - // Convert an object into a list of `[key, value]` pairs. - _.pairs = function(obj) { - var keys = _.keys(obj); - var length = keys.length; - var pairs = Array(length); - for (var i = 0; i < length; i++) { - pairs[i] = [keys[i], obj[keys[i]]]; - } - return pairs; - }; - - // Invert the keys and values of an object. The values must be serializable. - _.invert = function(obj) { - var result = {}; - var keys = _.keys(obj); - for (var i = 0, length = keys.length; i < length; i++) { - result[obj[keys[i]]] = keys[i]; - } - return result; - }; - - // Return a sorted list of the function names available on the object. - // Aliased as `methods` - _.functions = _.methods = function(obj) { - var names = []; - for (var key in obj) { - if (_.isFunction(obj[key])) names.push(key); - } - return names.sort(); - }; - - // Extend a given object with all the properties in passed-in object(s). - _.extend = createAssigner(_.allKeys); - - // Assigns a given object with all the own properties in the passed-in object(s) - // (https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object/assign) - _.extendOwn = _.assign = createAssigner(_.keys); - - // Returns the first key on an object that passes a predicate test - _.findKey = function(obj, predicate, context) { - predicate = cb(predicate, context); - var keys = _.keys(obj), key; - for (var i = 0, length = keys.length; i < length; i++) { - key = keys[i]; - if (predicate(obj[key], key, obj)) return key; - } - }; - - // Return a copy of the object only containing the whitelisted properties. - _.pick = function(object, oiteratee, context) { - var result = {}, obj = object, iteratee, keys; - if (obj == null) return result; - if (_.isFunction(oiteratee)) { - keys = _.allKeys(obj); - iteratee = optimizeCb(oiteratee, context); - } else { - keys = flatten(arguments, false, false, 1); - iteratee = function(value, key, obj) { return key in obj; }; - obj = Object(obj); - } - for (var i = 0, length = keys.length; i < length; i++) { - var key = keys[i]; - var value = obj[key]; - if (iteratee(value, key, obj)) result[key] = value; - } - return result; - }; - - // Return a copy of the object without the blacklisted properties. - _.omit = function(obj, iteratee, context) { - if (_.isFunction(iteratee)) { - iteratee = _.negate(iteratee); - } else { - var keys = _.map(flatten(arguments, false, false, 1), String); - iteratee = function(value, key) { - return !_.contains(keys, key); - }; - } - return _.pick(obj, iteratee, context); - }; - - // Fill in a given object with default properties. - _.defaults = createAssigner(_.allKeys, true); - - // Creates an object that inherits from the given prototype object. - // If additional properties are provided then they will be added to the - // created object. - _.create = function(prototype, props) { - var result = baseCreate(prototype); - if (props) _.extendOwn(result, props); - return result; - }; - - // Create a (shallow-cloned) duplicate of an object. - _.clone = function(obj) { - if (!_.isObject(obj)) return obj; - return _.isArray(obj) ? obj.slice() : _.extend({}, obj); - }; - - // Invokes interceptor with the obj, and then returns obj. - // The primary purpose of this method is to "tap into" a method chain, in - // order to perform operations on intermediate results within the chain. - _.tap = function(obj, interceptor) { - interceptor(obj); - return obj; - }; - - // Returns whether an object has a given set of `key:value` pairs. - _.isMatch = function(object, attrs) { - var keys = _.keys(attrs), length = keys.length; - if (object == null) return !length; - var obj = Object(object); - for (var i = 0; i < length; i++) { - var key = keys[i]; - if (attrs[key] !== obj[key] || !(key in obj)) return false; - } - return true; - }; - - - // Internal recursive comparison function for `isEqual`. - var eq = function(a, b, aStack, bStack) { - // Identical objects are equal. `0 === -0`, but they aren't identical. - // See the [Harmony `egal` proposal](http://wiki.ecmascript.org/doku.php?id=harmony:egal). - if (a === b) return a !== 0 || 1 / a === 1 / b; - // A strict comparison is necessary because `null == undefined`. - if (a == null || b == null) return a === b; - // Unwrap any wrapped objects. - if (a instanceof _) a = a._wrapped; - if (b instanceof _) b = b._wrapped; - // Compare `[[Class]]` names. - var className = toString.call(a); - if (className !== toString.call(b)) return false; - switch (className) { - // Strings, numbers, regular expressions, dates, and booleans are compared by value. - case '[object RegExp]': - // RegExps are coerced to strings for comparison (Note: '' + /a/i === '/a/i') - case '[object String]': - // Primitives and their corresponding object wrappers are equivalent; thus, `"5"` is - // equivalent to `new String("5")`. - return '' + a === '' + b; - case '[object Number]': - // `NaN`s are equivalent, but non-reflexive. - // Object(NaN) is equivalent to NaN - if (+a !== +a) return +b !== +b; - // An `egal` comparison is performed for other numeric values. - return +a === 0 ? 1 / +a === 1 / b : +a === +b; - case '[object Date]': - case '[object Boolean]': - // Coerce dates and booleans to numeric primitive values. Dates are compared by their - // millisecond representations. Note that invalid dates with millisecond representations - // of `NaN` are not equivalent. - return +a === +b; - } - - var areArrays = className === '[object Array]'; - if (!areArrays) { - if (typeof a != 'object' || typeof b != 'object') return false; - - // Objects with different constructors are not equivalent, but `Object`s or `Array`s - // from different frames are. - var aCtor = a.constructor, bCtor = b.constructor; - if (aCtor !== bCtor && !(_.isFunction(aCtor) && aCtor instanceof aCtor && - _.isFunction(bCtor) && bCtor instanceof bCtor) - && ('constructor' in a && 'constructor' in b)) { - return false; - } - } - // Assume equality for cyclic structures. The algorithm for detecting cyclic - // structures is adapted from ES 5.1 section 15.12.3, abstract operation `JO`. - - // Initializing stack of traversed objects. - // It's done here since we only need them for objects and arrays comparison. - aStack = aStack || []; - bStack = bStack || []; - var length = aStack.length; - while (length--) { - // Linear search. Performance is inversely proportional to the number of - // unique nested structures. - if (aStack[length] === a) return bStack[length] === b; - } - - // Add the first object to the stack of traversed objects. - aStack.push(a); - bStack.push(b); - - // Recursively compare objects and arrays. - if (areArrays) { - // Compare array lengths to determine if a deep comparison is necessary. - length = a.length; - if (length !== b.length) return false; - // Deep compare the contents, ignoring non-numeric properties. - while (length--) { - if (!eq(a[length], b[length], aStack, bStack)) return false; - } - } else { - // Deep compare objects. - var keys = _.keys(a), key; - length = keys.length; - // Ensure that both objects contain the same number of properties before comparing deep equality. - if (_.keys(b).length !== length) return false; - while (length--) { - // Deep compare each member - key = keys[length]; - if (!(_.has(b, key) && eq(a[key], b[key], aStack, bStack))) return false; - } - } - // Remove the first object from the stack of traversed objects. - aStack.pop(); - bStack.pop(); - return true; - }; - - // Perform a deep comparison to check if two objects are equal. - _.isEqual = function(a, b) { - return eq(a, b); - }; - - // Is a given array, string, or object empty? - // An "empty" object has no enumerable own-properties. - _.isEmpty = function(obj) { - if (obj == null) return true; - if (isArrayLike(obj) && (_.isArray(obj) || _.isString(obj) || _.isArguments(obj))) return obj.length === 0; - return _.keys(obj).length === 0; - }; - - // Is a given value a DOM element? - _.isElement = function(obj) { - return !!(obj && obj.nodeType === 1); - }; - - // Is a given value an array? - // Delegates to ECMA5's native Array.isArray - _.isArray = nativeIsArray || function(obj) { - return toString.call(obj) === '[object Array]'; - }; - - // Is a given variable an object? - _.isObject = function(obj) { - var type = typeof obj; - return type === 'function' || type === 'object' && !!obj; - }; - - // Add some isType methods: isArguments, isFunction, isString, isNumber, isDate, isRegExp, isError. - _.each(['Arguments', 'Function', 'String', 'Number', 'Date', 'RegExp', 'Error'], function(name) { - _['is' + name] = function(obj) { - return toString.call(obj) === '[object ' + name + ']'; - }; - }); - - // Define a fallback version of the method in browsers (ahem, IE < 9), where - // there isn't any inspectable "Arguments" type. - if (!_.isArguments(arguments)) { - _.isArguments = function(obj) { - return _.has(obj, 'callee'); - }; - } - - // Optimize `isFunction` if appropriate. Work around some typeof bugs in old v8, - // IE 11 (#1621), and in Safari 8 (#1929). - if (typeof /./ != 'function' && typeof Int8Array != 'object') { - _.isFunction = function(obj) { - return typeof obj == 'function' || false; - }; - } - - // Is a given object a finite number? - _.isFinite = function(obj) { - return isFinite(obj) && !isNaN(parseFloat(obj)); - }; - - // Is the given value `NaN`? (NaN is the only number which does not equal itself). - _.isNaN = function(obj) { - return _.isNumber(obj) && obj !== +obj; - }; - - // Is a given value a boolean? - _.isBoolean = function(obj) { - return obj === true || obj === false || toString.call(obj) === '[object Boolean]'; - }; - - // Is a given value equal to null? - _.isNull = function(obj) { - return obj === null; - }; - - // Is a given variable undefined? - _.isUndefined = function(obj) { - return obj === void 0; - }; - - // Shortcut function for checking if an object has a given property directly - // on itself (in other words, not on a prototype). - _.has = function(obj, key) { - return obj != null && hasOwnProperty.call(obj, key); - }; - - // Utility Functions - // ----------------- - - // Run Underscore.js in *noConflict* mode, returning the `_` variable to its - // previous owner. Returns a reference to the Underscore object. - _.noConflict = function() { - root._ = previousUnderscore; - return this; - }; - - // Keep the identity function around for default iteratees. - _.identity = function(value) { - return value; - }; - - // Predicate-generating functions. Often useful outside of Underscore. - _.constant = function(value) { - return function() { - return value; - }; - }; - - _.noop = function(){}; - - _.property = property; - - // Generates a function for a given object that returns a given property. - _.propertyOf = function(obj) { - return obj == null ? function(){} : function(key) { - return obj[key]; - }; - }; - - // Returns a predicate for checking whether an object has a given set of - // `key:value` pairs. - _.matcher = _.matches = function(attrs) { - attrs = _.extendOwn({}, attrs); - return function(obj) { - return _.isMatch(obj, attrs); - }; - }; - - // Run a function **n** times. - _.times = function(n, iteratee, context) { - var accum = Array(Math.max(0, n)); - iteratee = optimizeCb(iteratee, context, 1); - for (var i = 0; i < n; i++) accum[i] = iteratee(i); - return accum; - }; - - // Return a random integer between min and max (inclusive). - _.random = function(min, max) { - if (max == null) { - max = min; - min = 0; - } - return min + Math.floor(Math.random() * (max - min + 1)); - }; - - // A (possibly faster) way to get the current timestamp as an integer. - _.now = Date.now || function() { - return new Date().getTime(); - }; - - // List of HTML entities for escaping. - var escapeMap = { - '&': '&', - '<': '<', - '>': '>', - '"': '"', - "'": ''', - '`': '`' - }; - var unescapeMap = _.invert(escapeMap); - - // Functions for escaping and unescaping strings to/from HTML interpolation. - var createEscaper = function(map) { - var escaper = function(match) { - return map[match]; - }; - // Regexes for identifying a key that needs to be escaped - var source = '(?:' + _.keys(map).join('|') + ')'; - var testRegexp = RegExp(source); - var replaceRegexp = RegExp(source, 'g'); - return function(string) { - string = string == null ? '' : '' + string; - return testRegexp.test(string) ? string.replace(replaceRegexp, escaper) : string; - }; - }; - _.escape = createEscaper(escapeMap); - _.unescape = createEscaper(unescapeMap); - - // If the value of the named `property` is a function then invoke it with the - // `object` as context; otherwise, return it. - _.result = function(object, property, fallback) { - var value = object == null ? void 0 : object[property]; - if (value === void 0) { - value = fallback; - } - return _.isFunction(value) ? value.call(object) : value; - }; - - // Generate a unique integer id (unique within the entire client session). - // Useful for temporary DOM ids. - var idCounter = 0; - _.uniqueId = function(prefix) { - var id = ++idCounter + ''; - return prefix ? prefix + id : id; - }; - - // By default, Underscore uses ERB-style template delimiters, change the - // following template settings to use alternative delimiters. - _.templateSettings = { - evaluate : /<%([\s\S]+?)%>/g, - interpolate : /<%=([\s\S]+?)%>/g, - escape : /<%-([\s\S]+?)%>/g - }; - - // When customizing `templateSettings`, if you don't want to define an - // interpolation, evaluation or escaping regex, we need one that is - // guaranteed not to match. - var noMatch = /(.)^/; - - // Certain characters need to be escaped so that they can be put into a - // string literal. - var escapes = { - "'": "'", - '\\': '\\', - '\r': 'r', - '\n': 'n', - '\u2028': 'u2028', - '\u2029': 'u2029' - }; - - var escaper = /\\|'|\r|\n|\u2028|\u2029/g; - - var escapeChar = function(match) { - return '\\' + escapes[match]; - }; - - // JavaScript micro-templating, similar to John Resig's implementation. - // Underscore templating handles arbitrary delimiters, preserves whitespace, - // and correctly escapes quotes within interpolated code. - // NB: `oldSettings` only exists for backwards compatibility. - _.template = function(text, settings, oldSettings) { - if (!settings && oldSettings) settings = oldSettings; - settings = _.defaults({}, settings, _.templateSettings); - - // Combine delimiters into one regular expression via alternation. - var matcher = RegExp([ - (settings.escape || noMatch).source, - (settings.interpolate || noMatch).source, - (settings.evaluate || noMatch).source - ].join('|') + '|$', 'g'); - - // Compile the template source, escaping string literals appropriately. - var index = 0; - var source = "__p+='"; - text.replace(matcher, function(match, escape, interpolate, evaluate, offset) { - source += text.slice(index, offset).replace(escaper, escapeChar); - index = offset + match.length; - - if (escape) { - source += "'+\n((__t=(" + escape + "))==null?'':_.escape(__t))+\n'"; - } else if (interpolate) { - source += "'+\n((__t=(" + interpolate + "))==null?'':__t)+\n'"; - } else if (evaluate) { - source += "';\n" + evaluate + "\n__p+='"; - } - - // Adobe VMs need the match returned to produce the correct offest. - return match; - }); - source += "';\n"; - - // If a variable is not specified, place data values in local scope. - if (!settings.variable) source = 'with(obj||{}){\n' + source + '}\n'; - - source = "var __t,__p='',__j=Array.prototype.join," + - "print=function(){__p+=__j.call(arguments,'');};\n" + - source + 'return __p;\n'; - - try { - var render = new Function(settings.variable || 'obj', '_', source); - } catch (e) { - e.source = source; - throw e; - } - - var template = function(data) { - return render.call(this, data, _); - }; - - // Provide the compiled source as a convenience for precompilation. - var argument = settings.variable || 'obj'; - template.source = 'function(' + argument + '){\n' + source + '}'; - - return template; - }; - - // Add a "chain" function. Start chaining a wrapped Underscore object. - _.chain = function(obj) { - var instance = _(obj); - instance._chain = true; - return instance; - }; - - // OOP - // --------------- - // If Underscore is called as a function, it returns a wrapped object that - // can be used OO-style. This wrapper holds altered versions of all the - // underscore functions. Wrapped objects may be chained. - - // Helper function to continue chaining intermediate results. - var result = function(instance, obj) { - return instance._chain ? _(obj).chain() : obj; - }; - - // Add your own custom functions to the Underscore object. - _.mixin = function(obj) { - _.each(_.functions(obj), function(name) { - var func = _[name] = obj[name]; - _.prototype[name] = function() { - var args = [this._wrapped]; - push.apply(args, arguments); - return result(this, func.apply(_, args)); - }; - }); - }; - - // Add all of the Underscore functions to the wrapper object. - _.mixin(_); - - // Add all mutator Array functions to the wrapper. - _.each(['pop', 'push', 'reverse', 'shift', 'sort', 'splice', 'unshift'], function(name) { - var method = ArrayProto[name]; - _.prototype[name] = function() { - var obj = this._wrapped; - method.apply(obj, arguments); - if ((name === 'shift' || name === 'splice') && obj.length === 0) delete obj[0]; - return result(this, obj); - }; - }); - - // Add all accessor Array functions to the wrapper. - _.each(['concat', 'join', 'slice'], function(name) { - var method = ArrayProto[name]; - _.prototype[name] = function() { - return result(this, method.apply(this._wrapped, arguments)); - }; - }); - - // Extracts the result from a wrapped and chained object. - _.prototype.value = function() { - return this._wrapped; - }; - - // Provide unwrapping proxy for some methods used in engine operations - // such as arithmetic and JSON stringification. - _.prototype.valueOf = _.prototype.toJSON = _.prototype.value; - - _.prototype.toString = function() { - return '' + this._wrapped; - }; - - // AMD registration happens at the end for compatibility with AMD loaders - // that may not enforce next-turn semantics on modules. Even though general - // practice for AMD registration is to be anonymous, underscore registers - // as a named module because, like jQuery, it is a base library that is - // popular enough to be bundled in a third party lib, but not be part of - // an AMD load request. Those cases could generate an error when an - // anonymous define() is called outside of a loader request. - if (typeof define === 'function' && define.amd) { - define('underscore', [], function() { - return _; - }); - } -}.call(this)); - -},{}],26:[function(require,module,exports){ -arguments[4][19][0].apply(exports,arguments) -},{"dup":19}],27:[function(require,module,exports){ -module.exports = function isBuffer(arg) { - return arg && typeof arg === 'object' - && typeof arg.copy === 'function' - && typeof arg.fill === 'function' - && typeof arg.readUInt8 === 'function'; -} -},{}],28:[function(require,module,exports){ -(function (process,global){ -// Copyright Joyent, Inc. and other Node contributors. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to permit -// persons to whom the Software is furnished to do so, subject to the -// following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN -// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -// USE OR OTHER DEALINGS IN THE SOFTWARE. - -var formatRegExp = /%[sdj%]/g; -exports.format = function(f) { - if (!isString(f)) { - var objects = []; - for (var i = 0; i < arguments.length; i++) { - objects.push(inspect(arguments[i])); - } - return objects.join(' '); - } - - var i = 1; - var args = arguments; - var len = args.length; - var str = String(f).replace(formatRegExp, function(x) { - if (x === '%%') return '%'; - if (i >= len) return x; - switch (x) { - case '%s': return String(args[i++]); - case '%d': return Number(args[i++]); - case '%j': - try { - return JSON.stringify(args[i++]); - } catch (_) { - return '[Circular]'; - } - default: - return x; - } - }); - for (var x = args[i]; i < len; x = args[++i]) { - if (isNull(x) || !isObject(x)) { - str += ' ' + x; - } else { - str += ' ' + inspect(x); - } - } - return str; -}; - - -// Mark that a method should not be used. -// Returns a modified function which warns once by default. -// If --no-deprecation is set, then it is a no-op. -exports.deprecate = function(fn, msg) { - // Allow for deprecating things in the process of starting up. - if (isUndefined(global.process)) { - return function() { - return exports.deprecate(fn, msg).apply(this, arguments); - }; - } - - if (process.noDeprecation === true) { - return fn; - } - - var warned = false; - function deprecated() { - if (!warned) { - if (process.throwDeprecation) { - throw new Error(msg); - } else if (process.traceDeprecation) { - console.trace(msg); - } else { - console.error(msg); - } - warned = true; - } - return fn.apply(this, arguments); - } - - return deprecated; -}; - - -var debugs = {}; -var debugEnviron; -exports.debuglog = function(set) { - if (isUndefined(debugEnviron)) - debugEnviron = process.env.NODE_DEBUG || ''; - set = set.toUpperCase(); - if (!debugs[set]) { - if (new RegExp('\\b' + set + '\\b', 'i').test(debugEnviron)) { - var pid = process.pid; - debugs[set] = function() { - var msg = exports.format.apply(exports, arguments); - console.error('%s %d: %s', set, pid, msg); - }; - } else { - debugs[set] = function() {}; - } - } - return debugs[set]; -}; - - -/** - * Echos the value of a value. Trys to print the value out - * in the best way possible given the different types. - * - * @param {Object} obj The object to print out. - * @param {Object} opts Optional options object that alters the output. - */ -/* legacy: obj, showHidden, depth, colors*/ -function inspect(obj, opts) { - // default options - var ctx = { - seen: [], - stylize: stylizeNoColor - }; - // legacy... - if (arguments.length >= 3) ctx.depth = arguments[2]; - if (arguments.length >= 4) ctx.colors = arguments[3]; - if (isBoolean(opts)) { - // legacy... - ctx.showHidden = opts; - } else if (opts) { - // got an "options" object - exports._extend(ctx, opts); - } - // set default options - if (isUndefined(ctx.showHidden)) ctx.showHidden = false; - if (isUndefined(ctx.depth)) ctx.depth = 2; - if (isUndefined(ctx.colors)) ctx.colors = false; - if (isUndefined(ctx.customInspect)) ctx.customInspect = true; - if (ctx.colors) ctx.stylize = stylizeWithColor; - return formatValue(ctx, obj, ctx.depth); -} -exports.inspect = inspect; - - -// http://en.wikipedia.org/wiki/ANSI_escape_code#graphics -inspect.colors = { - 'bold' : [1, 22], - 'italic' : [3, 23], - 'underline' : [4, 24], - 'inverse' : [7, 27], - 'white' : [37, 39], - 'grey' : [90, 39], - 'black' : [30, 39], - 'blue' : [34, 39], - 'cyan' : [36, 39], - 'green' : [32, 39], - 'magenta' : [35, 39], - 'red' : [31, 39], - 'yellow' : [33, 39] -}; - -// Don't use 'blue' not visible on cmd.exe -inspect.styles = { - 'special': 'cyan', - 'number': 'yellow', - 'boolean': 'yellow', - 'undefined': 'grey', - 'null': 'bold', - 'string': 'green', - 'date': 'magenta', - // "name": intentionally not styling - 'regexp': 'red' -}; - - -function stylizeWithColor(str, styleType) { - var style = inspect.styles[styleType]; - - if (style) { - return '\u001b[' + inspect.colors[style][0] + 'm' + str + - '\u001b[' + inspect.colors[style][1] + 'm'; - } else { - return str; - } -} - - -function stylizeNoColor(str, styleType) { - return str; -} - - -function arrayToHash(array) { - var hash = {}; - - array.forEach(function(val, idx) { - hash[val] = true; - }); - - return hash; -} - - -function formatValue(ctx, value, recurseTimes) { - // Provide a hook for user-specified inspect functions. - // Check that value is an object with an inspect function on it - if (ctx.customInspect && - value && - isFunction(value.inspect) && - // Filter out the util module, it's inspect function is special - value.inspect !== exports.inspect && - // Also filter out any prototype objects using the circular check. - !(value.constructor && value.constructor.prototype === value)) { - var ret = value.inspect(recurseTimes, ctx); - if (!isString(ret)) { - ret = formatValue(ctx, ret, recurseTimes); - } - return ret; - } - - // Primitive types cannot have properties - var primitive = formatPrimitive(ctx, value); - if (primitive) { - return primitive; - } - - // Look up the keys of the object. - var keys = Object.keys(value); - var visibleKeys = arrayToHash(keys); - - if (ctx.showHidden) { - keys = Object.getOwnPropertyNames(value); - } - - // IE doesn't make error fields non-enumerable - // http://msdn.microsoft.com/en-us/library/ie/dww52sbt(v=vs.94).aspx - if (isError(value) - && (keys.indexOf('message') >= 0 || keys.indexOf('description') >= 0)) { - return formatError(value); - } - - // Some type of object without properties can be shortcutted. - if (keys.length === 0) { - if (isFunction(value)) { - var name = value.name ? ': ' + value.name : ''; - return ctx.stylize('[Function' + name + ']', 'special'); - } - if (isRegExp(value)) { - return ctx.stylize(RegExp.prototype.toString.call(value), 'regexp'); - } - if (isDate(value)) { - return ctx.stylize(Date.prototype.toString.call(value), 'date'); - } - if (isError(value)) { - return formatError(value); - } - } - - var base = '', array = false, braces = ['{', '}']; - - // Make Array say that they are Array - if (isArray(value)) { - array = true; - braces = ['[', ']']; - } - - // Make functions say that they are functions - if (isFunction(value)) { - var n = value.name ? ': ' + value.name : ''; - base = ' [Function' + n + ']'; - } - - // Make RegExps say that they are RegExps - if (isRegExp(value)) { - base = ' ' + RegExp.prototype.toString.call(value); - } - - // Make dates with properties first say the date - if (isDate(value)) { - base = ' ' + Date.prototype.toUTCString.call(value); - } - - // Make error with message first say the error - if (isError(value)) { - base = ' ' + formatError(value); - } - - if (keys.length === 0 && (!array || value.length == 0)) { - return braces[0] + base + braces[1]; - } - - if (recurseTimes < 0) { - if (isRegExp(value)) { - return ctx.stylize(RegExp.prototype.toString.call(value), 'regexp'); - } else { - return ctx.stylize('[Object]', 'special'); - } - } - - ctx.seen.push(value); - - var output; - if (array) { - output = formatArray(ctx, value, recurseTimes, visibleKeys, keys); - } else { - output = keys.map(function(key) { - return formatProperty(ctx, value, recurseTimes, visibleKeys, key, array); - }); - } - - ctx.seen.pop(); - - return reduceToSingleString(output, base, braces); -} - - -function formatPrimitive(ctx, value) { - if (isUndefined(value)) - return ctx.stylize('undefined', 'undefined'); - if (isString(value)) { - var simple = '\'' + JSON.stringify(value).replace(/^"|"$/g, '') - .replace(/'/g, "\\'") - .replace(/\\"/g, '"') + '\''; - return ctx.stylize(simple, 'string'); - } - if (isNumber(value)) - return ctx.stylize('' + value, 'number'); - if (isBoolean(value)) - return ctx.stylize('' + value, 'boolean'); - // For some reason typeof null is "object", so special case here. - if (isNull(value)) - return ctx.stylize('null', 'null'); -} - - -function formatError(value) { - return '[' + Error.prototype.toString.call(value) + ']'; -} - - -function formatArray(ctx, value, recurseTimes, visibleKeys, keys) { - var output = []; - for (var i = 0, l = value.length; i < l; ++i) { - if (hasOwnProperty(value, String(i))) { - output.push(formatProperty(ctx, value, recurseTimes, visibleKeys, - String(i), true)); - } else { - output.push(''); - } - } - keys.forEach(function(key) { - if (!key.match(/^\d+$/)) { - output.push(formatProperty(ctx, value, recurseTimes, visibleKeys, - key, true)); - } - }); - return output; -} - - -function formatProperty(ctx, value, recurseTimes, visibleKeys, key, array) { - var name, str, desc; - desc = Object.getOwnPropertyDescriptor(value, key) || { value: value[key] }; - if (desc.get) { - if (desc.set) { - str = ctx.stylize('[Getter/Setter]', 'special'); - } else { - str = ctx.stylize('[Getter]', 'special'); - } - } else { - if (desc.set) { - str = ctx.stylize('[Setter]', 'special'); - } - } - if (!hasOwnProperty(visibleKeys, key)) { - name = '[' + key + ']'; - } - if (!str) { - if (ctx.seen.indexOf(desc.value) < 0) { - if (isNull(recurseTimes)) { - str = formatValue(ctx, desc.value, null); - } else { - str = formatValue(ctx, desc.value, recurseTimes - 1); - } - if (str.indexOf('\n') > -1) { - if (array) { - str = str.split('\n').map(function(line) { - return ' ' + line; - }).join('\n').substr(2); - } else { - str = '\n' + str.split('\n').map(function(line) { - return ' ' + line; - }).join('\n'); - } - } - } else { - str = ctx.stylize('[Circular]', 'special'); - } - } - if (isUndefined(name)) { - if (array && key.match(/^\d+$/)) { - return str; - } - name = JSON.stringify('' + key); - if (name.match(/^"([a-zA-Z_][a-zA-Z_0-9]*)"$/)) { - name = name.substr(1, name.length - 2); - name = ctx.stylize(name, 'name'); - } else { - name = name.replace(/'/g, "\\'") - .replace(/\\"/g, '"') - .replace(/(^"|"$)/g, "'"); - name = ctx.stylize(name, 'string'); - } - } - - return name + ': ' + str; -} - - -function reduceToSingleString(output, base, braces) { - var numLinesEst = 0; - var length = output.reduce(function(prev, cur) { - numLinesEst++; - if (cur.indexOf('\n') >= 0) numLinesEst++; - return prev + cur.replace(/\u001b\[\d\d?m/g, '').length + 1; - }, 0); - - if (length > 60) { - return braces[0] + - (base === '' ? '' : base + '\n ') + - ' ' + - output.join(',\n ') + - ' ' + - braces[1]; - } - - return braces[0] + base + ' ' + output.join(', ') + ' ' + braces[1]; -} - - -// NOTE: These type checking functions intentionally don't use `instanceof` -// because it is fragile and can be easily faked with `Object.create()`. -function isArray(ar) { - return Array.isArray(ar); -} -exports.isArray = isArray; - -function isBoolean(arg) { - return typeof arg === 'boolean'; -} -exports.isBoolean = isBoolean; - -function isNull(arg) { - return arg === null; -} -exports.isNull = isNull; - -function isNullOrUndefined(arg) { - return arg == null; -} -exports.isNullOrUndefined = isNullOrUndefined; - -function isNumber(arg) { - return typeof arg === 'number'; -} -exports.isNumber = isNumber; - -function isString(arg) { - return typeof arg === 'string'; -} -exports.isString = isString; - -function isSymbol(arg) { - return typeof arg === 'symbol'; -} -exports.isSymbol = isSymbol; - -function isUndefined(arg) { - return arg === void 0; -} -exports.isUndefined = isUndefined; - -function isRegExp(re) { - return isObject(re) && objectToString(re) === '[object RegExp]'; -} -exports.isRegExp = isRegExp; - -function isObject(arg) { - return typeof arg === 'object' && arg !== null; -} -exports.isObject = isObject; - -function isDate(d) { - return isObject(d) && objectToString(d) === '[object Date]'; -} -exports.isDate = isDate; - -function isError(e) { - return isObject(e) && - (objectToString(e) === '[object Error]' || e instanceof Error); -} -exports.isError = isError; - -function isFunction(arg) { - return typeof arg === 'function'; -} -exports.isFunction = isFunction; - -function isPrimitive(arg) { - return arg === null || - typeof arg === 'boolean' || - typeof arg === 'number' || - typeof arg === 'string' || - typeof arg === 'symbol' || // ES6 symbol - typeof arg === 'undefined'; -} -exports.isPrimitive = isPrimitive; - -exports.isBuffer = require('./support/isBuffer'); - -function objectToString(o) { - return Object.prototype.toString.call(o); -} - - -function pad(n) { - return n < 10 ? '0' + n.toString(10) : n.toString(10); -} - - -var months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', - 'Oct', 'Nov', 'Dec']; - -// 26 Feb 16:19:34 -function timestamp() { - var d = new Date(); - var time = [pad(d.getHours()), - pad(d.getMinutes()), - pad(d.getSeconds())].join(':'); - return [d.getDate(), months[d.getMonth()], time].join(' '); -} - - -// log is just a thin wrapper to console.log that prepends a timestamp -exports.log = function() { - console.log('%s - %s', timestamp(), exports.format.apply(exports, arguments)); -}; - - -/** - * Inherit the prototype methods from one constructor into another. - * - * The Function.prototype.inherits from lang.js rewritten as a standalone - * function (not on Function.prototype). NOTE: If this file is to be loaded - * during bootstrapping this function needs to be rewritten using some native - * functions as prototype setup using normal JavaScript does not work as - * expected during bootstrapping (see mirror.js in r114903). - * - * @param {function} ctor Constructor function which needs to inherit the - * prototype. - * @param {function} superCtor Constructor function to inherit prototype from. - */ -exports.inherits = require('inherits'); - -exports._extend = function(origin, add) { - // Don't do anything if add isn't an object - if (!add || !isObject(add)) return origin; - - var keys = Object.keys(add); - var i = keys.length; - while (i--) { - origin[keys[i]] = add[keys[i]]; - } - return origin; -}; - -function hasOwnProperty(obj, prop) { - return Object.prototype.hasOwnProperty.call(obj, prop); -} - -}).call(this,require('_process'),typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {}) -},{"./support/isBuffer":27,"_process":24,"inherits":26}],29:[function(require,module,exports){ -// Returns a wrapper function that returns a wrapped callback -// The wrapper function should do some stuff, and return a -// presumably different callback function. -// This makes sure that own properties are retained, so that -// decorations and such are not lost along the way. -module.exports = wrappy -function wrappy (fn, cb) { - if (fn && cb) return wrappy(fn)(cb) - - if (typeof fn !== 'function') - throw new TypeError('need wrapper function') - - Object.keys(fn).forEach(function (k) { - wrapper[k] = fn[k] - }) - - return wrapper - - function wrapper() { - var args = new Array(arguments.length) - for (var i = 0; i < args.length; i++) { - args[i] = arguments[i] - } - var ret = fn.apply(this, args) - var cb = args[args.length-1] - if (typeof ret === 'function' && ret !== cb) { - Object.keys(cb).forEach(function (k) { - ret[k] = cb[k] - }) - } - return ret - } -} - -},{}]},{},[7])(7) -}); \ No newline at end of file diff --git a/assets/javascripts/workers/search.22074ed6.min.js b/assets/javascripts/workers/search.22074ed6.min.js deleted file mode 100644 index 1134cf8baa6a..000000000000 --- a/assets/javascripts/workers/search.22074ed6.min.js +++ /dev/null @@ -1,48 +0,0 @@ -(()=>{var de=Object.create;var B=Object.defineProperty;var ge=Object.getOwnPropertyDescriptor;var ye=Object.getOwnPropertyNames,Y=Object.getOwnPropertySymbols,me=Object.getPrototypeOf,G=Object.prototype.hasOwnProperty,ve=Object.prototype.propertyIsEnumerable;var J=(t,e,r)=>e in t?B(t,e,{enumerable:!0,configurable:!0,writable:!0,value:r}):t[e]=r,X=(t,e)=>{for(var r in e||(e={}))G.call(e,r)&&J(t,r,e[r]);if(Y)for(var r of Y(e))ve.call(e,r)&&J(t,r,e[r]);return t};var xe=t=>B(t,"__esModule",{value:!0});var Z=(t,e)=>()=>(e||t((e={exports:{}}).exports,e),e.exports);var Se=(t,e,r,n)=>{if(e&&typeof e=="object"||typeof e=="function")for(let i of ye(e))!G.call(t,i)&&(r||i!=="default")&&B(t,i,{get:()=>e[i],enumerable:!(n=ge(e,i))||n.enumerable});return t},U=(t,e)=>Se(xe(B(t!=null?de(me(t)):{},"default",!e&&t&&t.__esModule?{get:()=>t.default,enumerable:!0}:{value:t,enumerable:!0})),t);var z=(t,e,r)=>new Promise((n,i)=>{var s=u=>{try{a(r.next(u))}catch(c){i(c)}},o=u=>{try{a(r.throw(u))}catch(c){i(c)}},a=u=>u.done?n(u.value):Promise.resolve(u.value).then(s,o);a((r=r.apply(t,e)).next())});var te=Z((K,ee)=>{/** - * lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright - 2.3.9 - * Copyright (C) 2020 Oliver Nightingale - * @license MIT - */(function(){var t=function(e){var r=new t.Builder;return r.pipeline.add(t.trimmer,t.stopWordFilter,t.stemmer),r.searchPipeline.add(t.stemmer),e.call(r,r),r.build()};t.version="2.3.9";/*! - * lunr.utils - * Copyright (C) 2020 Oliver Nightingale - */t.utils={},t.utils.warn=function(e){return function(r){e.console&&console.warn&&console.warn(r)}}(this),t.utils.asString=function(e){return e==null?"":e.toString()},t.utils.clone=function(e){if(e==null)return e;for(var r=Object.create(null),n=Object.keys(e),i=0;i0){var h=t.utils.clone(r)||{};h.position=[a,c],h.index=s.length,s.push(new t.Token(n.slice(a,o),h))}a=o+1}}return s},t.tokenizer.separator=/[\s\-]+/;/*! - * lunr.Pipeline - * Copyright (C) 2020 Oliver Nightingale - */t.Pipeline=function(){this._stack=[]},t.Pipeline.registeredFunctions=Object.create(null),t.Pipeline.registerFunction=function(e,r){r in this.registeredFunctions&&t.utils.warn("Overwriting existing registered function: "+r),e.label=r,t.Pipeline.registeredFunctions[e.label]=e},t.Pipeline.warnIfFunctionNotRegistered=function(e){var r=e.label&&e.label in this.registeredFunctions;r||t.utils.warn(`Function is not registered with pipeline. This may cause problems when serialising the index. -`,e)},t.Pipeline.load=function(e){var r=new t.Pipeline;return e.forEach(function(n){var i=t.Pipeline.registeredFunctions[n];if(i)r.add(i);else throw new Error("Cannot load unregistered function: "+n)}),r},t.Pipeline.prototype.add=function(){var e=Array.prototype.slice.call(arguments);e.forEach(function(r){t.Pipeline.warnIfFunctionNotRegistered(r),this._stack.push(r)},this)},t.Pipeline.prototype.after=function(e,r){t.Pipeline.warnIfFunctionNotRegistered(r);var n=this._stack.indexOf(e);if(n==-1)throw new Error("Cannot find existingFn");n=n+1,this._stack.splice(n,0,r)},t.Pipeline.prototype.before=function(e,r){t.Pipeline.warnIfFunctionNotRegistered(r);var n=this._stack.indexOf(e);if(n==-1)throw new Error("Cannot find existingFn");this._stack.splice(n,0,r)},t.Pipeline.prototype.remove=function(e){var r=this._stack.indexOf(e);r!=-1&&this._stack.splice(r,1)},t.Pipeline.prototype.run=function(e){for(var r=this._stack.length,n=0;n1&&(oe&&(n=s),o!=e);)i=n-r,s=r+Math.floor(i/2),o=this.elements[s*2];if(o==e||o>e)return s*2;if(ou?h+=2:a==u&&(r+=n[c+1]*i[h+1],c+=2,h+=2);return r},t.Vector.prototype.similarity=function(e){return this.dot(e)/this.magnitude()||0},t.Vector.prototype.toArray=function(){for(var e=new Array(this.elements.length/2),r=1,n=0;r0){var o=s.str.charAt(0),a;o in s.node.edges?a=s.node.edges[o]:(a=new t.TokenSet,s.node.edges[o]=a),s.str.length==1&&(a.final=!0),i.push({node:a,editsRemaining:s.editsRemaining,str:s.str.slice(1)})}if(s.editsRemaining!=0){if("*"in s.node.edges)var u=s.node.edges["*"];else{var u=new t.TokenSet;s.node.edges["*"]=u}if(s.str.length==0&&(u.final=!0),i.push({node:u,editsRemaining:s.editsRemaining-1,str:s.str}),s.str.length>1&&i.push({node:s.node,editsRemaining:s.editsRemaining-1,str:s.str.slice(1)}),s.str.length==1&&(s.node.final=!0),s.str.length>=1){if("*"in s.node.edges)var c=s.node.edges["*"];else{var c=new t.TokenSet;s.node.edges["*"]=c}s.str.length==1&&(c.final=!0),i.push({node:c,editsRemaining:s.editsRemaining-1,str:s.str.slice(1)})}if(s.str.length>1){var h=s.str.charAt(0),y=s.str.charAt(1),g;y in s.node.edges?g=s.node.edges[y]:(g=new t.TokenSet,s.node.edges[y]=g),s.str.length==1&&(g.final=!0),i.push({node:g,editsRemaining:s.editsRemaining-1,str:h+s.str.slice(2)})}}}return n},t.TokenSet.fromString=function(e){for(var r=new t.TokenSet,n=r,i=0,s=e.length;i=e;r--){var n=this.uncheckedNodes[r],i=n.child.toString();i in this.minimizedNodes?n.parent.edges[n.char]=this.minimizedNodes[i]:(n.child._str=i,this.minimizedNodes[i]=n.child),this.uncheckedNodes.pop()}};/*! - * lunr.Index - * Copyright (C) 2020 Oliver Nightingale - */t.Index=function(e){this.invertedIndex=e.invertedIndex,this.fieldVectors=e.fieldVectors,this.tokenSet=e.tokenSet,this.fields=e.fields,this.pipeline=e.pipeline},t.Index.prototype.search=function(e){return this.query(function(r){var n=new t.QueryParser(e,r);n.parse()})},t.Index.prototype.query=function(e){for(var r=new t.Query(this.fields),n=Object.create(null),i=Object.create(null),s=Object.create(null),o=Object.create(null),a=Object.create(null),u=0;u1?this._b=1:this._b=e},t.Builder.prototype.k1=function(e){this._k1=e},t.Builder.prototype.add=function(e,r){var n=e[this._ref],i=Object.keys(this._fields);this._documents[n]=r||{},this.documentCount+=1;for(var s=0;s=this.length)return t.QueryLexer.EOS;var e=this.str.charAt(this.pos);return this.pos+=1,e},t.QueryLexer.prototype.width=function(){return this.pos-this.start},t.QueryLexer.prototype.ignore=function(){this.start==this.pos&&(this.pos+=1),this.start=this.pos},t.QueryLexer.prototype.backup=function(){this.pos-=1},t.QueryLexer.prototype.acceptDigitRun=function(){var e,r;do e=this.next(),r=e.charCodeAt(0);while(r>47&&r<58);e!=t.QueryLexer.EOS&&this.backup()},t.QueryLexer.prototype.more=function(){return this.pos1&&(e.backup(),e.emit(t.QueryLexer.TERM)),e.ignore(),e.more())return t.QueryLexer.lexText},t.QueryLexer.lexEditDistance=function(e){return e.ignore(),e.acceptDigitRun(),e.emit(t.QueryLexer.EDIT_DISTANCE),t.QueryLexer.lexText},t.QueryLexer.lexBoost=function(e){return e.ignore(),e.acceptDigitRun(),e.emit(t.QueryLexer.BOOST),t.QueryLexer.lexText},t.QueryLexer.lexEOS=function(e){e.width()>0&&e.emit(t.QueryLexer.TERM)},t.QueryLexer.termSeparator=t.tokenizer.separator,t.QueryLexer.lexText=function(e){for(;;){var r=e.next();if(r==t.QueryLexer.EOS)return t.QueryLexer.lexEOS;if(r.charCodeAt(0)==92){e.escapeCharacter();continue}if(r==":")return t.QueryLexer.lexField;if(r=="~")return e.backup(),e.width()>0&&e.emit(t.QueryLexer.TERM),t.QueryLexer.lexEditDistance;if(r=="^")return e.backup(),e.width()>0&&e.emit(t.QueryLexer.TERM),t.QueryLexer.lexBoost;if(r=="+"&&e.width()===1||r=="-"&&e.width()===1)return e.emit(t.QueryLexer.PRESENCE),t.QueryLexer.lexText;if(r.match(t.QueryLexer.termSeparator))return t.QueryLexer.lexTerm}},t.QueryParser=function(e,r){this.lexer=new t.QueryLexer(e),this.query=r,this.currentClause={},this.lexemeIdx=0},t.QueryParser.prototype.parse=function(){this.lexer.run(),this.lexemes=this.lexer.lexemes;for(var e=t.QueryParser.parseClause;e;)e=e(this);return this.query},t.QueryParser.prototype.peekLexeme=function(){return this.lexemes[this.lexemeIdx]},t.QueryParser.prototype.consumeLexeme=function(){var e=this.peekLexeme();return this.lexemeIdx+=1,e},t.QueryParser.prototype.nextClause=function(){var e=this.currentClause;this.query.clause(e),this.currentClause={}},t.QueryParser.parseClause=function(e){var r=e.peekLexeme();if(r!=null)switch(r.type){case t.QueryLexer.PRESENCE:return t.QueryParser.parsePresence;case t.QueryLexer.FIELD:return t.QueryParser.parseField;case t.QueryLexer.TERM:return t.QueryParser.parseTerm;default:var n="expected either a field or a term, found "+r.type;throw r.str.length>=1&&(n+=" with value '"+r.str+"'"),new t.QueryParseError(n,r.start,r.end)}},t.QueryParser.parsePresence=function(e){var r=e.consumeLexeme();if(r!=null){switch(r.str){case"-":e.currentClause.presence=t.Query.presence.PROHIBITED;break;case"+":e.currentClause.presence=t.Query.presence.REQUIRED;break;default:var n="unrecognised presence operator'"+r.str+"'";throw new t.QueryParseError(n,r.start,r.end)}var i=e.peekLexeme();if(i==null){var n="expecting term or field, found nothing";throw new t.QueryParseError(n,r.start,r.end)}switch(i.type){case t.QueryLexer.FIELD:return t.QueryParser.parseField;case t.QueryLexer.TERM:return t.QueryParser.parseTerm;default:var n="expecting term or field, found '"+i.type+"'";throw new t.QueryParseError(n,i.start,i.end)}}},t.QueryParser.parseField=function(e){var r=e.consumeLexeme();if(r!=null){if(e.query.allFields.indexOf(r.str)==-1){var n=e.query.allFields.map(function(o){return"'"+o+"'"}).join(", "),i="unrecognised field '"+r.str+"', possible fields: "+n;throw new t.QueryParseError(i,r.start,r.end)}e.currentClause.fields=[r.str];var s=e.peekLexeme();if(s==null){var i="expecting term, found nothing";throw new t.QueryParseError(i,r.start,r.end)}switch(s.type){case t.QueryLexer.TERM:return t.QueryParser.parseTerm;default:var i="expecting term, found '"+s.type+"'";throw new t.QueryParseError(i,s.start,s.end)}}},t.QueryParser.parseTerm=function(e){var r=e.consumeLexeme();if(r!=null){e.currentClause.term=r.str.toLowerCase(),r.str.indexOf("*")!=-1&&(e.currentClause.usePipeline=!1);var n=e.peekLexeme();if(n==null){e.nextClause();return}switch(n.type){case t.QueryLexer.TERM:return e.nextClause(),t.QueryParser.parseTerm;case t.QueryLexer.FIELD:return e.nextClause(),t.QueryParser.parseField;case t.QueryLexer.EDIT_DISTANCE:return t.QueryParser.parseEditDistance;case t.QueryLexer.BOOST:return t.QueryParser.parseBoost;case t.QueryLexer.PRESENCE:return e.nextClause(),t.QueryParser.parsePresence;default:var i="Unexpected lexeme type '"+n.type+"'";throw new t.QueryParseError(i,n.start,n.end)}}},t.QueryParser.parseEditDistance=function(e){var r=e.consumeLexeme();if(r!=null){var n=parseInt(r.str,10);if(isNaN(n)){var i="edit distance must be numeric";throw new t.QueryParseError(i,r.start,r.end)}e.currentClause.editDistance=n;var s=e.peekLexeme();if(s==null){e.nextClause();return}switch(s.type){case t.QueryLexer.TERM:return e.nextClause(),t.QueryParser.parseTerm;case t.QueryLexer.FIELD:return e.nextClause(),t.QueryParser.parseField;case t.QueryLexer.EDIT_DISTANCE:return t.QueryParser.parseEditDistance;case t.QueryLexer.BOOST:return t.QueryParser.parseBoost;case t.QueryLexer.PRESENCE:return e.nextClause(),t.QueryParser.parsePresence;default:var i="Unexpected lexeme type '"+s.type+"'";throw new t.QueryParseError(i,s.start,s.end)}}},t.QueryParser.parseBoost=function(e){var r=e.consumeLexeme();if(r!=null){var n=parseInt(r.str,10);if(isNaN(n)){var i="boost must be numeric";throw new t.QueryParseError(i,r.start,r.end)}e.currentClause.boost=n;var s=e.peekLexeme();if(s==null){e.nextClause();return}switch(s.type){case t.QueryLexer.TERM:return e.nextClause(),t.QueryParser.parseTerm;case t.QueryLexer.FIELD:return e.nextClause(),t.QueryParser.parseField;case t.QueryLexer.EDIT_DISTANCE:return t.QueryParser.parseEditDistance;case t.QueryLexer.BOOST:return t.QueryParser.parseBoost;case t.QueryLexer.PRESENCE:return e.nextClause(),t.QueryParser.parsePresence;default:var i="Unexpected lexeme type '"+s.type+"'";throw new t.QueryParseError(i,s.start,s.end)}}},function(e,r){typeof define=="function"&&define.amd?define(r):typeof K=="object"?ee.exports=r():e.lunr=r()}(this,function(){return t})})()});var W=Z((Te,re)=>{"use strict";/*! - * escape-html - * Copyright(c) 2012-2013 TJ Holowaychuk - * Copyright(c) 2015 Andreas Lubbe - * Copyright(c) 2015 Tiancheng "Timothy" Gu - * MIT Licensed - */var Qe=/["'&<>]/;re.exports=be;function be(t){var e=""+t,r=Qe.exec(e);if(!r)return e;var n,i="",s=0,o=0;for(s=r.index;s=0;r--){let n=t[r];typeof n!="object"?n=document.createTextNode(n):n.parentNode&&n.parentNode.removeChild(n),r?e.insertBefore(this.previousSibling,n):e.replaceChild(n,this)}}}));var ne=U(W());function ie(t){let e=new Map,r=new Set;for(let n of t){let[i,s]=n.location.split("#"),o=n.location,a=n.title,u=(0,ne.default)(n.text).replace(/\s+(?=[,.:;!?])/g,"").replace(/\s+/g," ");if(s){let c=e.get(i);r.has(c)?e.set(o,{location:o,title:a,text:u,parent:c}):(c.title=n.title,c.text=u,r.add(c))}else e.set(o,{location:o,title:a,text:u})}return e}var se=U(W());function oe(t,e){let r=new RegExp(t.separator,"img"),n=(i,s,o)=>`${s}${o}`;return i=>{i=i.replace(/[\s*+\-:~^]+/g," ").trim();let s=new RegExp(`(^|${t.separator})(${i.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return o=>(e?(0,se.default)(o):o).replace(s,n).replace(/<\/mark>(\s+)]*>/img,"$1")}}function ae(t){let e=new lunr.Query(["title","text"]);return new lunr.QueryParser(t,e).parse(),e.clauses}function ue(t,e){var i;let r=new Set(t),n={};for(let s=0;s!n.has(i)))]}var H=class{constructor({config:e,docs:r,options:n}){this.options=n,this.documents=ie(r),this.highlight=oe(e,!1),lunr.tokenizer.separator=new RegExp(e.separator),this.index=lunr(function(){e.lang.length===1&&e.lang[0]!=="en"?this.use(lunr[e.lang[0]]):e.lang.length>1&&this.use(lunr.multiLanguage(...e.lang));let i=Le(["trimmer","stopWordFilter","stemmer"],n.pipeline);for(let s of e.lang.map(o=>o==="en"?lunr:lunr[o]))for(let o of i)this.pipeline.remove(s[o]),this.searchPipeline.remove(s[o]);this.ref("location"),this.field("title",{boost:1e3}),this.field("text");for(let s of r)this.add(s)})}search(e){if(e)try{let r=this.highlight(e),n=ae(e).filter(o=>o.presence!==lunr.Query.presence.PROHIBITED),i=this.index.search(`${e}*`).reduce((o,{ref:a,score:u,matchData:c})=>{let h=this.documents.get(a);if(typeof h!="undefined"){let{location:y,title:g,text:b,parent:v}=h,Q=ue(n,Object.keys(c.metadata)),p=+!v+ +Object.values(Q).every(d=>d);o.push({location:y,title:r(g),text:r(b),score:u*(1+p),terms:Q})}return o},[]).sort((o,a)=>a.score-o.score).reduce((o,a)=>{let u=this.documents.get(a.location);if(typeof u!="undefined"){let c="parent"in u?u.parent.location:u.location;o.set(c,[...o.get(c)||[],a])}return o},new Map),s;if(this.options.suggestions){let o=this.index.query(a=>{for(let u of n)a.term(u.term,{fields:["title"],presence:lunr.Query.presence.REQUIRED,wildcard:lunr.Query.wildcard.TRAILING})});s=o.length?Object.keys(o[0].matchData.metadata):[]}return X({items:[...i.values()]},typeof s!="undefined"&&{suggestions:s})}catch(r){console.warn(`Invalid query: ${e} \u2013 see https://bit.ly/2s3ChXG`)}return{items:[]}}};var q;function we(t){return z(this,null,function*(){let e="../lunr";if(typeof parent!="undefined"&&"IFrameWorker"in parent){let n=document.querySelector("script[src]"),[i]=n.src.split("/worker");e=e.replace("..",i)}let r=[];for(let n of t.lang){switch(n){case"ja":r.push(`${e}/tinyseg.js`);break;case"hi":case"th":r.push(`${e}/wordcut.js`);break}n!=="en"&&r.push(`${e}/min/lunr.${n}.min.js`)}t.lang.length>1&&r.push(`${e}/min/lunr.multi.min.js`),r.length&&(yield importScripts(`${e}/min/lunr.stemmer.support.min.js`,...r))})}function Ee(t){return z(this,null,function*(){switch(t.type){case 0:return yield we(t.data.config),q=new H(t.data),{type:1};case 2:return{type:3,data:q?q.search(t.data):{items:[]}};default:throw new TypeError("Invalid message type")}})}self.lunr=ce.default;addEventListener("message",t=>z(void 0,null,function*(){postMessage(yield Ee(t.data))}));})(); -//# sourceMappingURL=search.22074ed6.min.js.map - diff --git a/assets/javascripts/workers/search.22074ed6.min.js.map b/assets/javascripts/workers/search.22074ed6.min.js.map deleted file mode 100644 index 0c9df50f0a39..000000000000 --- a/assets/javascripts/workers/search.22074ed6.min.js.map +++ /dev/null @@ -1,8 +0,0 @@ -{ - "version": 3, - "sources": ["node_modules/lunr/lunr.js", "node_modules/escape-html/index.js", "src/assets/javascripts/integrations/search/worker/main/index.ts", "src/assets/javascripts/polyfills/index.ts", "src/assets/javascripts/integrations/search/document/index.ts", "src/assets/javascripts/integrations/search/highlighter/index.ts", "src/assets/javascripts/integrations/search/query/_/index.ts", "src/assets/javascripts/integrations/search/_/index.ts"], - "sourceRoot": "../../../..", - "sourcesContent": ["/**\n * lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright - 2.3.9\n * Copyright (C) 2020 Oliver Nightingale\n * @license MIT\n */\n\n;(function(){\n\n/**\n * A convenience function for configuring and constructing\n * a new lunr Index.\n *\n * A lunr.Builder instance is created and the pipeline setup\n * with a trimmer, stop word filter and stemmer.\n *\n * This builder object is yielded to the configuration function\n * that is passed as a parameter, allowing the list of fields\n * and other builder parameters to be customised.\n *\n * All documents _must_ be added within the passed config function.\n *\n * @example\n * var idx = lunr(function () {\n * this.field('title')\n * this.field('body')\n * this.ref('id')\n *\n * documents.forEach(function (doc) {\n * this.add(doc)\n * }, this)\n * })\n *\n * @see {@link lunr.Builder}\n * @see {@link lunr.Pipeline}\n * @see {@link lunr.trimmer}\n * @see {@link lunr.stopWordFilter}\n * @see {@link lunr.stemmer}\n * @namespace {function} lunr\n */\nvar lunr = function (config) {\n var builder = new lunr.Builder\n\n builder.pipeline.add(\n lunr.trimmer,\n lunr.stopWordFilter,\n lunr.stemmer\n )\n\n builder.searchPipeline.add(\n lunr.stemmer\n )\n\n config.call(builder, builder)\n return builder.build()\n}\n\nlunr.version = \"2.3.9\"\n/*!\n * lunr.utils\n * Copyright (C) 2020 Oliver Nightingale\n */\n\n/**\n * A namespace containing utils for the rest of the lunr library\n * @namespace lunr.utils\n */\nlunr.utils = {}\n\n/**\n * Print a warning message to the console.\n *\n * @param {String} message The message to be printed.\n * @memberOf lunr.utils\n * @function\n */\nlunr.utils.warn = (function (global) {\n /* eslint-disable no-console */\n return function (message) {\n if (global.console && console.warn) {\n console.warn(message)\n }\n }\n /* eslint-enable no-console */\n})(this)\n\n/**\n * Convert an object to a string.\n *\n * In the case of `null` and `undefined` the function returns\n * the empty string, in all other cases the result of calling\n * `toString` on the passed object is returned.\n *\n * @param {Any} obj The object to convert to a string.\n * @return {String} string representation of the passed object.\n * @memberOf lunr.utils\n */\nlunr.utils.asString = function (obj) {\n if (obj === void 0 || obj === null) {\n return \"\"\n } else {\n return obj.toString()\n }\n}\n\n/**\n * Clones an object.\n *\n * Will create a copy of an existing object such that any mutations\n * on the copy cannot affect the original.\n *\n * Only shallow objects are supported, passing a nested object to this\n * function will cause a TypeError.\n *\n * Objects with primitives, and arrays of primitives are supported.\n *\n * @param {Object} obj The object to clone.\n * @return {Object} a clone of the passed object.\n * @throws {TypeError} when a nested object is passed.\n * @memberOf Utils\n */\nlunr.utils.clone = function (obj) {\n if (obj === null || obj === undefined) {\n return obj\n }\n\n var clone = Object.create(null),\n keys = Object.keys(obj)\n\n for (var i = 0; i < keys.length; i++) {\n var key = keys[i],\n val = obj[key]\n\n if (Array.isArray(val)) {\n clone[key] = val.slice()\n continue\n }\n\n if (typeof val === 'string' ||\n typeof val === 'number' ||\n typeof val === 'boolean') {\n clone[key] = val\n continue\n }\n\n throw new TypeError(\"clone is not deep and does not support nested objects\")\n }\n\n return clone\n}\nlunr.FieldRef = function (docRef, fieldName, stringValue) {\n this.docRef = docRef\n this.fieldName = fieldName\n this._stringValue = stringValue\n}\n\nlunr.FieldRef.joiner = \"/\"\n\nlunr.FieldRef.fromString = function (s) {\n var n = s.indexOf(lunr.FieldRef.joiner)\n\n if (n === -1) {\n throw \"malformed field ref string\"\n }\n\n var fieldRef = s.slice(0, n),\n docRef = s.slice(n + 1)\n\n return new lunr.FieldRef (docRef, fieldRef, s)\n}\n\nlunr.FieldRef.prototype.toString = function () {\n if (this._stringValue == undefined) {\n this._stringValue = this.fieldName + lunr.FieldRef.joiner + this.docRef\n }\n\n return this._stringValue\n}\n/*!\n * lunr.Set\n * Copyright (C) 2020 Oliver Nightingale\n */\n\n/**\n * A lunr set.\n *\n * @constructor\n */\nlunr.Set = function (elements) {\n this.elements = Object.create(null)\n\n if (elements) {\n this.length = elements.length\n\n for (var i = 0; i < this.length; i++) {\n this.elements[elements[i]] = true\n }\n } else {\n this.length = 0\n }\n}\n\n/**\n * A complete set that contains all elements.\n *\n * @static\n * @readonly\n * @type {lunr.Set}\n */\nlunr.Set.complete = {\n intersect: function (other) {\n return other\n },\n\n union: function () {\n return this\n },\n\n contains: function () {\n return true\n }\n}\n\n/**\n * An empty set that contains no elements.\n *\n * @static\n * @readonly\n * @type {lunr.Set}\n */\nlunr.Set.empty = {\n intersect: function () {\n return this\n },\n\n union: function (other) {\n return other\n },\n\n contains: function () {\n return false\n }\n}\n\n/**\n * Returns true if this set contains the specified object.\n *\n * @param {object} object - Object whose presence in this set is to be tested.\n * @returns {boolean} - True if this set contains the specified object.\n */\nlunr.Set.prototype.contains = function (object) {\n return !!this.elements[object]\n}\n\n/**\n * Returns a new set containing only the elements that are present in both\n * this set and the specified set.\n *\n * @param {lunr.Set} other - set to intersect with this set.\n * @returns {lunr.Set} a new set that is the intersection of this and the specified set.\n */\n\nlunr.Set.prototype.intersect = function (other) {\n var a, b, elements, intersection = []\n\n if (other === lunr.Set.complete) {\n return this\n }\n\n if (other === lunr.Set.empty) {\n return other\n }\n\n if (this.length < other.length) {\n a = this\n b = other\n } else {\n a = other\n b = this\n }\n\n elements = Object.keys(a.elements)\n\n for (var i = 0; i < elements.length; i++) {\n var element = elements[i]\n if (element in b.elements) {\n intersection.push(element)\n }\n }\n\n return new lunr.Set (intersection)\n}\n\n/**\n * Returns a new set combining the elements of this and the specified set.\n *\n * @param {lunr.Set} other - set to union with this set.\n * @return {lunr.Set} a new set that is the union of this and the specified set.\n */\n\nlunr.Set.prototype.union = function (other) {\n if (other === lunr.Set.complete) {\n return lunr.Set.complete\n }\n\n if (other === lunr.Set.empty) {\n return this\n }\n\n return new lunr.Set(Object.keys(this.elements).concat(Object.keys(other.elements)))\n}\n/**\n * A function to calculate the inverse document frequency for\n * a posting. This is shared between the builder and the index\n *\n * @private\n * @param {object} posting - The posting for a given term\n * @param {number} documentCount - The total number of documents.\n */\nlunr.idf = function (posting, documentCount) {\n var documentsWithTerm = 0\n\n for (var fieldName in posting) {\n if (fieldName == '_index') continue // Ignore the term index, its not a field\n documentsWithTerm += Object.keys(posting[fieldName]).length\n }\n\n var x = (documentCount - documentsWithTerm + 0.5) / (documentsWithTerm + 0.5)\n\n return Math.log(1 + Math.abs(x))\n}\n\n/**\n * A token wraps a string representation of a token\n * as it is passed through the text processing pipeline.\n *\n * @constructor\n * @param {string} [str=''] - The string token being wrapped.\n * @param {object} [metadata={}] - Metadata associated with this token.\n */\nlunr.Token = function (str, metadata) {\n this.str = str || \"\"\n this.metadata = metadata || {}\n}\n\n/**\n * Returns the token string that is being wrapped by this object.\n *\n * @returns {string}\n */\nlunr.Token.prototype.toString = function () {\n return this.str\n}\n\n/**\n * A token update function is used when updating or optionally\n * when cloning a token.\n *\n * @callback lunr.Token~updateFunction\n * @param {string} str - The string representation of the token.\n * @param {Object} metadata - All metadata associated with this token.\n */\n\n/**\n * Applies the given function to the wrapped string token.\n *\n * @example\n * token.update(function (str, metadata) {\n * return str.toUpperCase()\n * })\n *\n * @param {lunr.Token~updateFunction} fn - A function to apply to the token string.\n * @returns {lunr.Token}\n */\nlunr.Token.prototype.update = function (fn) {\n this.str = fn(this.str, this.metadata)\n return this\n}\n\n/**\n * Creates a clone of this token. Optionally a function can be\n * applied to the cloned token.\n *\n * @param {lunr.Token~updateFunction} [fn] - An optional function to apply to the cloned token.\n * @returns {lunr.Token}\n */\nlunr.Token.prototype.clone = function (fn) {\n fn = fn || function (s) { return s }\n return new lunr.Token (fn(this.str, this.metadata), this.metadata)\n}\n/*!\n * lunr.tokenizer\n * Copyright (C) 2020 Oliver Nightingale\n */\n\n/**\n * A function for splitting a string into tokens ready to be inserted into\n * the search index. Uses `lunr.tokenizer.separator` to split strings, change\n * the value of this property to change how strings are split into tokens.\n *\n * This tokenizer will convert its parameter to a string by calling `toString` and\n * then will split this string on the character in `lunr.tokenizer.separator`.\n * Arrays will have their elements converted to strings and wrapped in a lunr.Token.\n *\n * Optional metadata can be passed to the tokenizer, this metadata will be cloned and\n * added as metadata to every token that is created from the object to be tokenized.\n *\n * @static\n * @param {?(string|object|object[])} obj - The object to convert into tokens\n * @param {?object} metadata - Optional metadata to associate with every token\n * @returns {lunr.Token[]}\n * @see {@link lunr.Pipeline}\n */\nlunr.tokenizer = function (obj, metadata) {\n if (obj == null || obj == undefined) {\n return []\n }\n\n if (Array.isArray(obj)) {\n return obj.map(function (t) {\n return new lunr.Token(\n lunr.utils.asString(t).toLowerCase(),\n lunr.utils.clone(metadata)\n )\n })\n }\n\n var str = obj.toString().toLowerCase(),\n len = str.length,\n tokens = []\n\n for (var sliceEnd = 0, sliceStart = 0; sliceEnd <= len; sliceEnd++) {\n var char = str.charAt(sliceEnd),\n sliceLength = sliceEnd - sliceStart\n\n if ((char.match(lunr.tokenizer.separator) || sliceEnd == len)) {\n\n if (sliceLength > 0) {\n var tokenMetadata = lunr.utils.clone(metadata) || {}\n tokenMetadata[\"position\"] = [sliceStart, sliceLength]\n tokenMetadata[\"index\"] = tokens.length\n\n tokens.push(\n new lunr.Token (\n str.slice(sliceStart, sliceEnd),\n tokenMetadata\n )\n )\n }\n\n sliceStart = sliceEnd + 1\n }\n\n }\n\n return tokens\n}\n\n/**\n * The separator used to split a string into tokens. Override this property to change the behaviour of\n * `lunr.tokenizer` behaviour when tokenizing strings. By default this splits on whitespace and hyphens.\n *\n * @static\n * @see lunr.tokenizer\n */\nlunr.tokenizer.separator = /[\\s\\-]+/\n/*!\n * lunr.Pipeline\n * Copyright (C) 2020 Oliver Nightingale\n */\n\n/**\n * lunr.Pipelines maintain an ordered list of functions to be applied to all\n * tokens in documents entering the search index and queries being ran against\n * the index.\n *\n * An instance of lunr.Index created with the lunr shortcut will contain a\n * pipeline with a stop word filter and an English language stemmer. Extra\n * functions can be added before or after either of these functions or these\n * default functions can be removed.\n *\n * When run the pipeline will call each function in turn, passing a token, the\n * index of that token in the original list of all tokens and finally a list of\n * all the original tokens.\n *\n * The output of functions in the pipeline will be passed to the next function\n * in the pipeline. To exclude a token from entering the index the function\n * should return undefined, the rest of the pipeline will not be called with\n * this token.\n *\n * For serialisation of pipelines to work, all functions used in an instance of\n * a pipeline should be registered with lunr.Pipeline. Registered functions can\n * then be loaded. If trying to load a serialised pipeline that uses functions\n * that are not registered an error will be thrown.\n *\n * If not planning on serialising the pipeline then registering pipeline functions\n * is not necessary.\n *\n * @constructor\n */\nlunr.Pipeline = function () {\n this._stack = []\n}\n\nlunr.Pipeline.registeredFunctions = Object.create(null)\n\n/**\n * A pipeline function maps lunr.Token to lunr.Token. A lunr.Token contains the token\n * string as well as all known metadata. A pipeline function can mutate the token string\n * or mutate (or add) metadata for a given token.\n *\n * A pipeline function can indicate that the passed token should be discarded by returning\n * null, undefined or an empty string. This token will not be passed to any downstream pipeline\n * functions and will not be added to the index.\n *\n * Multiple tokens can be returned by returning an array of tokens. Each token will be passed\n * to any downstream pipeline functions and all will returned tokens will be added to the index.\n *\n * Any number of pipeline functions may be chained together using a lunr.Pipeline.\n *\n * @interface lunr.PipelineFunction\n * @param {lunr.Token} token - A token from the document being processed.\n * @param {number} i - The index of this token in the complete list of tokens for this document/field.\n * @param {lunr.Token[]} tokens - All tokens for this document/field.\n * @returns {(?lunr.Token|lunr.Token[])}\n */\n\n/**\n * Register a function with the pipeline.\n *\n * Functions that are used in the pipeline should be registered if the pipeline\n * needs to be serialised, or a serialised pipeline needs to be loaded.\n *\n * Registering a function does not add it to a pipeline, functions must still be\n * added to instances of the pipeline for them to be used when running a pipeline.\n *\n * @param {lunr.PipelineFunction} fn - The function to check for.\n * @param {String} label - The label to register this function with\n */\nlunr.Pipeline.registerFunction = function (fn, label) {\n if (label in this.registeredFunctions) {\n lunr.utils.warn('Overwriting existing registered function: ' + label)\n }\n\n fn.label = label\n lunr.Pipeline.registeredFunctions[fn.label] = fn\n}\n\n/**\n * Warns if the function is not registered as a Pipeline function.\n *\n * @param {lunr.PipelineFunction} fn - The function to check for.\n * @private\n */\nlunr.Pipeline.warnIfFunctionNotRegistered = function (fn) {\n var isRegistered = fn.label && (fn.label in this.registeredFunctions)\n\n if (!isRegistered) {\n lunr.utils.warn('Function is not registered with pipeline. This may cause problems when serialising the index.\\n', fn)\n }\n}\n\n/**\n * Loads a previously serialised pipeline.\n *\n * All functions to be loaded must already be registered with lunr.Pipeline.\n * If any function from the serialised data has not been registered then an\n * error will be thrown.\n *\n * @param {Object} serialised - The serialised pipeline to load.\n * @returns {lunr.Pipeline}\n */\nlunr.Pipeline.load = function (serialised) {\n var pipeline = new lunr.Pipeline\n\n serialised.forEach(function (fnName) {\n var fn = lunr.Pipeline.registeredFunctions[fnName]\n\n if (fn) {\n pipeline.add(fn)\n } else {\n throw new Error('Cannot load unregistered function: ' + fnName)\n }\n })\n\n return pipeline\n}\n\n/**\n * Adds new functions to the end of the pipeline.\n *\n * Logs a warning if the function has not been registered.\n *\n * @param {lunr.PipelineFunction[]} functions - Any number of functions to add to the pipeline.\n */\nlunr.Pipeline.prototype.add = function () {\n var fns = Array.prototype.slice.call(arguments)\n\n fns.forEach(function (fn) {\n lunr.Pipeline.warnIfFunctionNotRegistered(fn)\n this._stack.push(fn)\n }, this)\n}\n\n/**\n * Adds a single function after a function that already exists in the\n * pipeline.\n *\n * Logs a warning if the function has not been registered.\n *\n * @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline.\n * @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline.\n */\nlunr.Pipeline.prototype.after = function (existingFn, newFn) {\n lunr.Pipeline.warnIfFunctionNotRegistered(newFn)\n\n var pos = this._stack.indexOf(existingFn)\n if (pos == -1) {\n throw new Error('Cannot find existingFn')\n }\n\n pos = pos + 1\n this._stack.splice(pos, 0, newFn)\n}\n\n/**\n * Adds a single function before a function that already exists in the\n * pipeline.\n *\n * Logs a warning if the function has not been registered.\n *\n * @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline.\n * @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline.\n */\nlunr.Pipeline.prototype.before = function (existingFn, newFn) {\n lunr.Pipeline.warnIfFunctionNotRegistered(newFn)\n\n var pos = this._stack.indexOf(existingFn)\n if (pos == -1) {\n throw new Error('Cannot find existingFn')\n }\n\n this._stack.splice(pos, 0, newFn)\n}\n\n/**\n * Removes a function from the pipeline.\n *\n * @param {lunr.PipelineFunction} fn The function to remove from the pipeline.\n */\nlunr.Pipeline.prototype.remove = function (fn) {\n var pos = this._stack.indexOf(fn)\n if (pos == -1) {\n return\n }\n\n this._stack.splice(pos, 1)\n}\n\n/**\n * Runs the current list of functions that make up the pipeline against the\n * passed tokens.\n *\n * @param {Array} tokens The tokens to run through the pipeline.\n * @returns {Array}\n */\nlunr.Pipeline.prototype.run = function (tokens) {\n var stackLength = this._stack.length\n\n for (var i = 0; i < stackLength; i++) {\n var fn = this._stack[i]\n var memo = []\n\n for (var j = 0; j < tokens.length; j++) {\n var result = fn(tokens[j], j, tokens)\n\n if (result === null || result === void 0 || result === '') continue\n\n if (Array.isArray(result)) {\n for (var k = 0; k < result.length; k++) {\n memo.push(result[k])\n }\n } else {\n memo.push(result)\n }\n }\n\n tokens = memo\n }\n\n return tokens\n}\n\n/**\n * Convenience method for passing a string through a pipeline and getting\n * strings out. This method takes care of wrapping the passed string in a\n * token and mapping the resulting tokens back to strings.\n *\n * @param {string} str - The string to pass through the pipeline.\n * @param {?object} metadata - Optional metadata to associate with the token\n * passed to the pipeline.\n * @returns {string[]}\n */\nlunr.Pipeline.prototype.runString = function (str, metadata) {\n var token = new lunr.Token (str, metadata)\n\n return this.run([token]).map(function (t) {\n return t.toString()\n })\n}\n\n/**\n * Resets the pipeline by removing any existing processors.\n *\n */\nlunr.Pipeline.prototype.reset = function () {\n this._stack = []\n}\n\n/**\n * Returns a representation of the pipeline ready for serialisation.\n *\n * Logs a warning if the function has not been registered.\n *\n * @returns {Array}\n */\nlunr.Pipeline.prototype.toJSON = function () {\n return this._stack.map(function (fn) {\n lunr.Pipeline.warnIfFunctionNotRegistered(fn)\n\n return fn.label\n })\n}\n/*!\n * lunr.Vector\n * Copyright (C) 2020 Oliver Nightingale\n */\n\n/**\n * A vector is used to construct the vector space of documents and queries. These\n * vectors support operations to determine the similarity between two documents or\n * a document and a query.\n *\n * Normally no parameters are required for initializing a vector, but in the case of\n * loading a previously dumped vector the raw elements can be provided to the constructor.\n *\n * For performance reasons vectors are implemented with a flat array, where an elements\n * index is immediately followed by its value. E.g. [index, value, index, value]. This\n * allows the underlying array to be as sparse as possible and still offer decent\n * performance when being used for vector calculations.\n *\n * @constructor\n * @param {Number[]} [elements] - The flat list of element index and element value pairs.\n */\nlunr.Vector = function (elements) {\n this._magnitude = 0\n this.elements = elements || []\n}\n\n\n/**\n * Calculates the position within the vector to insert a given index.\n *\n * This is used internally by insert and upsert. If there are duplicate indexes then\n * the position is returned as if the value for that index were to be updated, but it\n * is the callers responsibility to check whether there is a duplicate at that index\n *\n * @param {Number} insertIdx - The index at which the element should be inserted.\n * @returns {Number}\n */\nlunr.Vector.prototype.positionForIndex = function (index) {\n // For an empty vector the tuple can be inserted at the beginning\n if (this.elements.length == 0) {\n return 0\n }\n\n var start = 0,\n end = this.elements.length / 2,\n sliceLength = end - start,\n pivotPoint = Math.floor(sliceLength / 2),\n pivotIndex = this.elements[pivotPoint * 2]\n\n while (sliceLength > 1) {\n if (pivotIndex < index) {\n start = pivotPoint\n }\n\n if (pivotIndex > index) {\n end = pivotPoint\n }\n\n if (pivotIndex == index) {\n break\n }\n\n sliceLength = end - start\n pivotPoint = start + Math.floor(sliceLength / 2)\n pivotIndex = this.elements[pivotPoint * 2]\n }\n\n if (pivotIndex == index) {\n return pivotPoint * 2\n }\n\n if (pivotIndex > index) {\n return pivotPoint * 2\n }\n\n if (pivotIndex < index) {\n return (pivotPoint + 1) * 2\n }\n}\n\n/**\n * Inserts an element at an index within the vector.\n *\n * Does not allow duplicates, will throw an error if there is already an entry\n * for this index.\n *\n * @param {Number} insertIdx - The index at which the element should be inserted.\n * @param {Number} val - The value to be inserted into the vector.\n */\nlunr.Vector.prototype.insert = function (insertIdx, val) {\n this.upsert(insertIdx, val, function () {\n throw \"duplicate index\"\n })\n}\n\n/**\n * Inserts or updates an existing index within the vector.\n *\n * @param {Number} insertIdx - The index at which the element should be inserted.\n * @param {Number} val - The value to be inserted into the vector.\n * @param {function} fn - A function that is called for updates, the existing value and the\n * requested value are passed as arguments\n */\nlunr.Vector.prototype.upsert = function (insertIdx, val, fn) {\n this._magnitude = 0\n var position = this.positionForIndex(insertIdx)\n\n if (this.elements[position] == insertIdx) {\n this.elements[position + 1] = fn(this.elements[position + 1], val)\n } else {\n this.elements.splice(position, 0, insertIdx, val)\n }\n}\n\n/**\n * Calculates the magnitude of this vector.\n *\n * @returns {Number}\n */\nlunr.Vector.prototype.magnitude = function () {\n if (this._magnitude) return this._magnitude\n\n var sumOfSquares = 0,\n elementsLength = this.elements.length\n\n for (var i = 1; i < elementsLength; i += 2) {\n var val = this.elements[i]\n sumOfSquares += val * val\n }\n\n return this._magnitude = Math.sqrt(sumOfSquares)\n}\n\n/**\n * Calculates the dot product of this vector and another vector.\n *\n * @param {lunr.Vector} otherVector - The vector to compute the dot product with.\n * @returns {Number}\n */\nlunr.Vector.prototype.dot = function (otherVector) {\n var dotProduct = 0,\n a = this.elements, b = otherVector.elements,\n aLen = a.length, bLen = b.length,\n aVal = 0, bVal = 0,\n i = 0, j = 0\n\n while (i < aLen && j < bLen) {\n aVal = a[i], bVal = b[j]\n if (aVal < bVal) {\n i += 2\n } else if (aVal > bVal) {\n j += 2\n } else if (aVal == bVal) {\n dotProduct += a[i + 1] * b[j + 1]\n i += 2\n j += 2\n }\n }\n\n return dotProduct\n}\n\n/**\n * Calculates the similarity between this vector and another vector.\n *\n * @param {lunr.Vector} otherVector - The other vector to calculate the\n * similarity with.\n * @returns {Number}\n */\nlunr.Vector.prototype.similarity = function (otherVector) {\n return this.dot(otherVector) / this.magnitude() || 0\n}\n\n/**\n * Converts the vector to an array of the elements within the vector.\n *\n * @returns {Number[]}\n */\nlunr.Vector.prototype.toArray = function () {\n var output = new Array (this.elements.length / 2)\n\n for (var i = 1, j = 0; i < this.elements.length; i += 2, j++) {\n output[j] = this.elements[i]\n }\n\n return output\n}\n\n/**\n * A JSON serializable representation of the vector.\n *\n * @returns {Number[]}\n */\nlunr.Vector.prototype.toJSON = function () {\n return this.elements\n}\n/* eslint-disable */\n/*!\n * lunr.stemmer\n * Copyright (C) 2020 Oliver Nightingale\n * Includes code from - http://tartarus.org/~martin/PorterStemmer/js.txt\n */\n\n/**\n * lunr.stemmer is an english language stemmer, this is a JavaScript\n * implementation of the PorterStemmer taken from http://tartarus.org/~martin\n *\n * @static\n * @implements {lunr.PipelineFunction}\n * @param {lunr.Token} token - The string to stem\n * @returns {lunr.Token}\n * @see {@link lunr.Pipeline}\n * @function\n */\nlunr.stemmer = (function(){\n var step2list = {\n \"ational\" : \"ate\",\n \"tional\" : \"tion\",\n \"enci\" : \"ence\",\n \"anci\" : \"ance\",\n \"izer\" : \"ize\",\n \"bli\" : \"ble\",\n \"alli\" : \"al\",\n \"entli\" : \"ent\",\n \"eli\" : \"e\",\n \"ousli\" : \"ous\",\n \"ization\" : \"ize\",\n \"ation\" : \"ate\",\n \"ator\" : \"ate\",\n \"alism\" : \"al\",\n \"iveness\" : \"ive\",\n \"fulness\" : \"ful\",\n \"ousness\" : \"ous\",\n \"aliti\" : \"al\",\n \"iviti\" : \"ive\",\n \"biliti\" : \"ble\",\n \"logi\" : \"log\"\n },\n\n step3list = {\n \"icate\" : \"ic\",\n \"ative\" : \"\",\n \"alize\" : \"al\",\n \"iciti\" : \"ic\",\n \"ical\" : \"ic\",\n \"ful\" : \"\",\n \"ness\" : \"\"\n },\n\n c = \"[^aeiou]\", // consonant\n v = \"[aeiouy]\", // vowel\n C = c + \"[^aeiouy]*\", // consonant sequence\n V = v + \"[aeiou]*\", // vowel sequence\n\n mgr0 = \"^(\" + C + \")?\" + V + C, // [C]VC... is m>0\n meq1 = \"^(\" + C + \")?\" + V + C + \"(\" + V + \")?$\", // [C]VC[V] is m=1\n mgr1 = \"^(\" + C + \")?\" + V + C + V + C, // [C]VCVC... is m>1\n s_v = \"^(\" + C + \")?\" + v; // vowel in stem\n\n var re_mgr0 = new RegExp(mgr0);\n var re_mgr1 = new RegExp(mgr1);\n var re_meq1 = new RegExp(meq1);\n var re_s_v = new RegExp(s_v);\n\n var re_1a = /^(.+?)(ss|i)es$/;\n var re2_1a = /^(.+?)([^s])s$/;\n var re_1b = /^(.+?)eed$/;\n var re2_1b = /^(.+?)(ed|ing)$/;\n var re_1b_2 = /.$/;\n var re2_1b_2 = /(at|bl|iz)$/;\n var re3_1b_2 = new RegExp(\"([^aeiouylsz])\\\\1$\");\n var re4_1b_2 = new RegExp(\"^\" + C + v + \"[^aeiouwxy]$\");\n\n var re_1c = /^(.+?[^aeiou])y$/;\n var re_2 = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;\n\n var re_3 = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;\n\n var re_4 = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;\n var re2_4 = /^(.+?)(s|t)(ion)$/;\n\n var re_5 = /^(.+?)e$/;\n var re_5_1 = /ll$/;\n var re3_5 = new RegExp(\"^\" + C + v + \"[^aeiouwxy]$\");\n\n var porterStemmer = function porterStemmer(w) {\n var stem,\n suffix,\n firstch,\n re,\n re2,\n re3,\n re4;\n\n if (w.length < 3) { return w; }\n\n firstch = w.substr(0,1);\n if (firstch == \"y\") {\n w = firstch.toUpperCase() + w.substr(1);\n }\n\n // Step 1a\n re = re_1a\n re2 = re2_1a;\n\n if (re.test(w)) { w = w.replace(re,\"$1$2\"); }\n else if (re2.test(w)) { w = w.replace(re2,\"$1$2\"); }\n\n // Step 1b\n re = re_1b;\n re2 = re2_1b;\n if (re.test(w)) {\n var fp = re.exec(w);\n re = re_mgr0;\n if (re.test(fp[1])) {\n re = re_1b_2;\n w = w.replace(re,\"\");\n }\n } else if (re2.test(w)) {\n var fp = re2.exec(w);\n stem = fp[1];\n re2 = re_s_v;\n if (re2.test(stem)) {\n w = stem;\n re2 = re2_1b_2;\n re3 = re3_1b_2;\n re4 = re4_1b_2;\n if (re2.test(w)) { w = w + \"e\"; }\n else if (re3.test(w)) { re = re_1b_2; w = w.replace(re,\"\"); }\n else if (re4.test(w)) { w = w + \"e\"; }\n }\n }\n\n // Step 1c - replace suffix y or Y by i if preceded by a non-vowel which is not the first letter of the word (so cry -> cri, by -> by, say -> say)\n re = re_1c;\n if (re.test(w)) {\n var fp = re.exec(w);\n stem = fp[1];\n w = stem + \"i\";\n }\n\n // Step 2\n re = re_2;\n if (re.test(w)) {\n var fp = re.exec(w);\n stem = fp[1];\n suffix = fp[2];\n re = re_mgr0;\n if (re.test(stem)) {\n w = stem + step2list[suffix];\n }\n }\n\n // Step 3\n re = re_3;\n if (re.test(w)) {\n var fp = re.exec(w);\n stem = fp[1];\n suffix = fp[2];\n re = re_mgr0;\n if (re.test(stem)) {\n w = stem + step3list[suffix];\n }\n }\n\n // Step 4\n re = re_4;\n re2 = re2_4;\n if (re.test(w)) {\n var fp = re.exec(w);\n stem = fp[1];\n re = re_mgr1;\n if (re.test(stem)) {\n w = stem;\n }\n } else if (re2.test(w)) {\n var fp = re2.exec(w);\n stem = fp[1] + fp[2];\n re2 = re_mgr1;\n if (re2.test(stem)) {\n w = stem;\n }\n }\n\n // Step 5\n re = re_5;\n if (re.test(w)) {\n var fp = re.exec(w);\n stem = fp[1];\n re = re_mgr1;\n re2 = re_meq1;\n re3 = re3_5;\n if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) {\n w = stem;\n }\n }\n\n re = re_5_1;\n re2 = re_mgr1;\n if (re.test(w) && re2.test(w)) {\n re = re_1b_2;\n w = w.replace(re,\"\");\n }\n\n // and turn initial Y back to y\n\n if (firstch == \"y\") {\n w = firstch.toLowerCase() + w.substr(1);\n }\n\n return w;\n };\n\n return function (token) {\n return token.update(porterStemmer);\n }\n})();\n\nlunr.Pipeline.registerFunction(lunr.stemmer, 'stemmer')\n/*!\n * lunr.stopWordFilter\n * Copyright (C) 2020 Oliver Nightingale\n */\n\n/**\n * lunr.generateStopWordFilter builds a stopWordFilter function from the provided\n * list of stop words.\n *\n * The built in lunr.stopWordFilter is built using this generator and can be used\n * to generate custom stopWordFilters for applications or non English languages.\n *\n * @function\n * @param {Array} token The token to pass through the filter\n * @returns {lunr.PipelineFunction}\n * @see lunr.Pipeline\n * @see lunr.stopWordFilter\n */\nlunr.generateStopWordFilter = function (stopWords) {\n var words = stopWords.reduce(function (memo, stopWord) {\n memo[stopWord] = stopWord\n return memo\n }, {})\n\n return function (token) {\n if (token && words[token.toString()] !== token.toString()) return token\n }\n}\n\n/**\n * lunr.stopWordFilter is an English language stop word list filter, any words\n * contained in the list will not be passed through the filter.\n *\n * This is intended to be used in the Pipeline. If the token does not pass the\n * filter then undefined will be returned.\n *\n * @function\n * @implements {lunr.PipelineFunction}\n * @params {lunr.Token} token - A token to check for being a stop word.\n * @returns {lunr.Token}\n * @see {@link lunr.Pipeline}\n */\nlunr.stopWordFilter = lunr.generateStopWordFilter([\n 'a',\n 'able',\n 'about',\n 'across',\n 'after',\n 'all',\n 'almost',\n 'also',\n 'am',\n 'among',\n 'an',\n 'and',\n 'any',\n 'are',\n 'as',\n 'at',\n 'be',\n 'because',\n 'been',\n 'but',\n 'by',\n 'can',\n 'cannot',\n 'could',\n 'dear',\n 'did',\n 'do',\n 'does',\n 'either',\n 'else',\n 'ever',\n 'every',\n 'for',\n 'from',\n 'get',\n 'got',\n 'had',\n 'has',\n 'have',\n 'he',\n 'her',\n 'hers',\n 'him',\n 'his',\n 'how',\n 'however',\n 'i',\n 'if',\n 'in',\n 'into',\n 'is',\n 'it',\n 'its',\n 'just',\n 'least',\n 'let',\n 'like',\n 'likely',\n 'may',\n 'me',\n 'might',\n 'most',\n 'must',\n 'my',\n 'neither',\n 'no',\n 'nor',\n 'not',\n 'of',\n 'off',\n 'often',\n 'on',\n 'only',\n 'or',\n 'other',\n 'our',\n 'own',\n 'rather',\n 'said',\n 'say',\n 'says',\n 'she',\n 'should',\n 'since',\n 'so',\n 'some',\n 'than',\n 'that',\n 'the',\n 'their',\n 'them',\n 'then',\n 'there',\n 'these',\n 'they',\n 'this',\n 'tis',\n 'to',\n 'too',\n 'twas',\n 'us',\n 'wants',\n 'was',\n 'we',\n 'were',\n 'what',\n 'when',\n 'where',\n 'which',\n 'while',\n 'who',\n 'whom',\n 'why',\n 'will',\n 'with',\n 'would',\n 'yet',\n 'you',\n 'your'\n])\n\nlunr.Pipeline.registerFunction(lunr.stopWordFilter, 'stopWordFilter')\n/*!\n * lunr.trimmer\n * Copyright (C) 2020 Oliver Nightingale\n */\n\n/**\n * lunr.trimmer is a pipeline function for trimming non word\n * characters from the beginning and end of tokens before they\n * enter the index.\n *\n * This implementation may not work correctly for non latin\n * characters and should either be removed or adapted for use\n * with languages with non-latin characters.\n *\n * @static\n * @implements {lunr.PipelineFunction}\n * @param {lunr.Token} token The token to pass through the filter\n * @returns {lunr.Token}\n * @see lunr.Pipeline\n */\nlunr.trimmer = function (token) {\n return token.update(function (s) {\n return s.replace(/^\\W+/, '').replace(/\\W+$/, '')\n })\n}\n\nlunr.Pipeline.registerFunction(lunr.trimmer, 'trimmer')\n/*!\n * lunr.TokenSet\n * Copyright (C) 2020 Oliver Nightingale\n */\n\n/**\n * A token set is used to store the unique list of all tokens\n * within an index. Token sets are also used to represent an\n * incoming query to the index, this query token set and index\n * token set are then intersected to find which tokens to look\n * up in the inverted index.\n *\n * A token set can hold multiple tokens, as in the case of the\n * index token set, or it can hold a single token as in the\n * case of a simple query token set.\n *\n * Additionally token sets are used to perform wildcard matching.\n * Leading, contained and trailing wildcards are supported, and\n * from this edit distance matching can also be provided.\n *\n * Token sets are implemented as a minimal finite state automata,\n * where both common prefixes and suffixes are shared between tokens.\n * This helps to reduce the space used for storing the token set.\n *\n * @constructor\n */\nlunr.TokenSet = function () {\n this.final = false\n this.edges = {}\n this.id = lunr.TokenSet._nextId\n lunr.TokenSet._nextId += 1\n}\n\n/**\n * Keeps track of the next, auto increment, identifier to assign\n * to a new tokenSet.\n *\n * TokenSets require a unique identifier to be correctly minimised.\n *\n * @private\n */\nlunr.TokenSet._nextId = 1\n\n/**\n * Creates a TokenSet instance from the given sorted array of words.\n *\n * @param {String[]} arr - A sorted array of strings to create the set from.\n * @returns {lunr.TokenSet}\n * @throws Will throw an error if the input array is not sorted.\n */\nlunr.TokenSet.fromArray = function (arr) {\n var builder = new lunr.TokenSet.Builder\n\n for (var i = 0, len = arr.length; i < len; i++) {\n builder.insert(arr[i])\n }\n\n builder.finish()\n return builder.root\n}\n\n/**\n * Creates a token set from a query clause.\n *\n * @private\n * @param {Object} clause - A single clause from lunr.Query.\n * @param {string} clause.term - The query clause term.\n * @param {number} [clause.editDistance] - The optional edit distance for the term.\n * @returns {lunr.TokenSet}\n */\nlunr.TokenSet.fromClause = function (clause) {\n if ('editDistance' in clause) {\n return lunr.TokenSet.fromFuzzyString(clause.term, clause.editDistance)\n } else {\n return lunr.TokenSet.fromString(clause.term)\n }\n}\n\n/**\n * Creates a token set representing a single string with a specified\n * edit distance.\n *\n * Insertions, deletions, substitutions and transpositions are each\n * treated as an edit distance of 1.\n *\n * Increasing the allowed edit distance will have a dramatic impact\n * on the performance of both creating and intersecting these TokenSets.\n * It is advised to keep the edit distance less than 3.\n *\n * @param {string} str - The string to create the token set from.\n * @param {number} editDistance - The allowed edit distance to match.\n * @returns {lunr.Vector}\n */\nlunr.TokenSet.fromFuzzyString = function (str, editDistance) {\n var root = new lunr.TokenSet\n\n var stack = [{\n node: root,\n editsRemaining: editDistance,\n str: str\n }]\n\n while (stack.length) {\n var frame = stack.pop()\n\n // no edit\n if (frame.str.length > 0) {\n var char = frame.str.charAt(0),\n noEditNode\n\n if (char in frame.node.edges) {\n noEditNode = frame.node.edges[char]\n } else {\n noEditNode = new lunr.TokenSet\n frame.node.edges[char] = noEditNode\n }\n\n if (frame.str.length == 1) {\n noEditNode.final = true\n }\n\n stack.push({\n node: noEditNode,\n editsRemaining: frame.editsRemaining,\n str: frame.str.slice(1)\n })\n }\n\n if (frame.editsRemaining == 0) {\n continue\n }\n\n // insertion\n if (\"*\" in frame.node.edges) {\n var insertionNode = frame.node.edges[\"*\"]\n } else {\n var insertionNode = new lunr.TokenSet\n frame.node.edges[\"*\"] = insertionNode\n }\n\n if (frame.str.length == 0) {\n insertionNode.final = true\n }\n\n stack.push({\n node: insertionNode,\n editsRemaining: frame.editsRemaining - 1,\n str: frame.str\n })\n\n // deletion\n // can only do a deletion if we have enough edits remaining\n // and if there are characters left to delete in the string\n if (frame.str.length > 1) {\n stack.push({\n node: frame.node,\n editsRemaining: frame.editsRemaining - 1,\n str: frame.str.slice(1)\n })\n }\n\n // deletion\n // just removing the last character from the str\n if (frame.str.length == 1) {\n frame.node.final = true\n }\n\n // substitution\n // can only do a substitution if we have enough edits remaining\n // and if there are characters left to substitute\n if (frame.str.length >= 1) {\n if (\"*\" in frame.node.edges) {\n var substitutionNode = frame.node.edges[\"*\"]\n } else {\n var substitutionNode = new lunr.TokenSet\n frame.node.edges[\"*\"] = substitutionNode\n }\n\n if (frame.str.length == 1) {\n substitutionNode.final = true\n }\n\n stack.push({\n node: substitutionNode,\n editsRemaining: frame.editsRemaining - 1,\n str: frame.str.slice(1)\n })\n }\n\n // transposition\n // can only do a transposition if there are edits remaining\n // and there are enough characters to transpose\n if (frame.str.length > 1) {\n var charA = frame.str.charAt(0),\n charB = frame.str.charAt(1),\n transposeNode\n\n if (charB in frame.node.edges) {\n transposeNode = frame.node.edges[charB]\n } else {\n transposeNode = new lunr.TokenSet\n frame.node.edges[charB] = transposeNode\n }\n\n if (frame.str.length == 1) {\n transposeNode.final = true\n }\n\n stack.push({\n node: transposeNode,\n editsRemaining: frame.editsRemaining - 1,\n str: charA + frame.str.slice(2)\n })\n }\n }\n\n return root\n}\n\n/**\n * Creates a TokenSet from a string.\n *\n * The string may contain one or more wildcard characters (*)\n * that will allow wildcard matching when intersecting with\n * another TokenSet.\n *\n * @param {string} str - The string to create a TokenSet from.\n * @returns {lunr.TokenSet}\n */\nlunr.TokenSet.fromString = function (str) {\n var node = new lunr.TokenSet,\n root = node\n\n /*\n * Iterates through all characters within the passed string\n * appending a node for each character.\n *\n * When a wildcard character is found then a self\n * referencing edge is introduced to continually match\n * any number of any characters.\n */\n for (var i = 0, len = str.length; i < len; i++) {\n var char = str[i],\n final = (i == len - 1)\n\n if (char == \"*\") {\n node.edges[char] = node\n node.final = final\n\n } else {\n var next = new lunr.TokenSet\n next.final = final\n\n node.edges[char] = next\n node = next\n }\n }\n\n return root\n}\n\n/**\n * Converts this TokenSet into an array of strings\n * contained within the TokenSet.\n *\n * This is not intended to be used on a TokenSet that\n * contains wildcards, in these cases the results are\n * undefined and are likely to cause an infinite loop.\n *\n * @returns {string[]}\n */\nlunr.TokenSet.prototype.toArray = function () {\n var words = []\n\n var stack = [{\n prefix: \"\",\n node: this\n }]\n\n while (stack.length) {\n var frame = stack.pop(),\n edges = Object.keys(frame.node.edges),\n len = edges.length\n\n if (frame.node.final) {\n /* In Safari, at this point the prefix is sometimes corrupted, see:\n * https://github.com/olivernn/lunr.js/issues/279 Calling any\n * String.prototype method forces Safari to \"cast\" this string to what\n * it's supposed to be, fixing the bug. */\n frame.prefix.charAt(0)\n words.push(frame.prefix)\n }\n\n for (var i = 0; i < len; i++) {\n var edge = edges[i]\n\n stack.push({\n prefix: frame.prefix.concat(edge),\n node: frame.node.edges[edge]\n })\n }\n }\n\n return words\n}\n\n/**\n * Generates a string representation of a TokenSet.\n *\n * This is intended to allow TokenSets to be used as keys\n * in objects, largely to aid the construction and minimisation\n * of a TokenSet. As such it is not designed to be a human\n * friendly representation of the TokenSet.\n *\n * @returns {string}\n */\nlunr.TokenSet.prototype.toString = function () {\n // NOTE: Using Object.keys here as this.edges is very likely\n // to enter 'hash-mode' with many keys being added\n //\n // avoiding a for-in loop here as it leads to the function\n // being de-optimised (at least in V8). From some simple\n // benchmarks the performance is comparable, but allowing\n // V8 to optimize may mean easy performance wins in the future.\n\n if (this._str) {\n return this._str\n }\n\n var str = this.final ? '1' : '0',\n labels = Object.keys(this.edges).sort(),\n len = labels.length\n\n for (var i = 0; i < len; i++) {\n var label = labels[i],\n node = this.edges[label]\n\n str = str + label + node.id\n }\n\n return str\n}\n\n/**\n * Returns a new TokenSet that is the intersection of\n * this TokenSet and the passed TokenSet.\n *\n * This intersection will take into account any wildcards\n * contained within the TokenSet.\n *\n * @param {lunr.TokenSet} b - An other TokenSet to intersect with.\n * @returns {lunr.TokenSet}\n */\nlunr.TokenSet.prototype.intersect = function (b) {\n var output = new lunr.TokenSet,\n frame = undefined\n\n var stack = [{\n qNode: b,\n output: output,\n node: this\n }]\n\n while (stack.length) {\n frame = stack.pop()\n\n // NOTE: As with the #toString method, we are using\n // Object.keys and a for loop instead of a for-in loop\n // as both of these objects enter 'hash' mode, causing\n // the function to be de-optimised in V8\n var qEdges = Object.keys(frame.qNode.edges),\n qLen = qEdges.length,\n nEdges = Object.keys(frame.node.edges),\n nLen = nEdges.length\n\n for (var q = 0; q < qLen; q++) {\n var qEdge = qEdges[q]\n\n for (var n = 0; n < nLen; n++) {\n var nEdge = nEdges[n]\n\n if (nEdge == qEdge || qEdge == '*') {\n var node = frame.node.edges[nEdge],\n qNode = frame.qNode.edges[qEdge],\n final = node.final && qNode.final,\n next = undefined\n\n if (nEdge in frame.output.edges) {\n // an edge already exists for this character\n // no need to create a new node, just set the finality\n // bit unless this node is already final\n next = frame.output.edges[nEdge]\n next.final = next.final || final\n\n } else {\n // no edge exists yet, must create one\n // set the finality bit and insert it\n // into the output\n next = new lunr.TokenSet\n next.final = final\n frame.output.edges[nEdge] = next\n }\n\n stack.push({\n qNode: qNode,\n output: next,\n node: node\n })\n }\n }\n }\n }\n\n return output\n}\nlunr.TokenSet.Builder = function () {\n this.previousWord = \"\"\n this.root = new lunr.TokenSet\n this.uncheckedNodes = []\n this.minimizedNodes = {}\n}\n\nlunr.TokenSet.Builder.prototype.insert = function (word) {\n var node,\n commonPrefix = 0\n\n if (word < this.previousWord) {\n throw new Error (\"Out of order word insertion\")\n }\n\n for (var i = 0; i < word.length && i < this.previousWord.length; i++) {\n if (word[i] != this.previousWord[i]) break\n commonPrefix++\n }\n\n this.minimize(commonPrefix)\n\n if (this.uncheckedNodes.length == 0) {\n node = this.root\n } else {\n node = this.uncheckedNodes[this.uncheckedNodes.length - 1].child\n }\n\n for (var i = commonPrefix; i < word.length; i++) {\n var nextNode = new lunr.TokenSet,\n char = word[i]\n\n node.edges[char] = nextNode\n\n this.uncheckedNodes.push({\n parent: node,\n char: char,\n child: nextNode\n })\n\n node = nextNode\n }\n\n node.final = true\n this.previousWord = word\n}\n\nlunr.TokenSet.Builder.prototype.finish = function () {\n this.minimize(0)\n}\n\nlunr.TokenSet.Builder.prototype.minimize = function (downTo) {\n for (var i = this.uncheckedNodes.length - 1; i >= downTo; i--) {\n var node = this.uncheckedNodes[i],\n childKey = node.child.toString()\n\n if (childKey in this.minimizedNodes) {\n node.parent.edges[node.char] = this.minimizedNodes[childKey]\n } else {\n // Cache the key for this node since\n // we know it can't change anymore\n node.child._str = childKey\n\n this.minimizedNodes[childKey] = node.child\n }\n\n this.uncheckedNodes.pop()\n }\n}\n/*!\n * lunr.Index\n * Copyright (C) 2020 Oliver Nightingale\n */\n\n/**\n * An index contains the built index of all documents and provides a query interface\n * to the index.\n *\n * Usually instances of lunr.Index will not be created using this constructor, instead\n * lunr.Builder should be used to construct new indexes, or lunr.Index.load should be\n * used to load previously built and serialized indexes.\n *\n * @constructor\n * @param {Object} attrs - The attributes of the built search index.\n * @param {Object} attrs.invertedIndex - An index of term/field to document reference.\n * @param {Object} attrs.fieldVectors - Field vectors\n * @param {lunr.TokenSet} attrs.tokenSet - An set of all corpus tokens.\n * @param {string[]} attrs.fields - The names of indexed document fields.\n * @param {lunr.Pipeline} attrs.pipeline - The pipeline to use for search terms.\n */\nlunr.Index = function (attrs) {\n this.invertedIndex = attrs.invertedIndex\n this.fieldVectors = attrs.fieldVectors\n this.tokenSet = attrs.tokenSet\n this.fields = attrs.fields\n this.pipeline = attrs.pipeline\n}\n\n/**\n * A result contains details of a document matching a search query.\n * @typedef {Object} lunr.Index~Result\n * @property {string} ref - The reference of the document this result represents.\n * @property {number} score - A number between 0 and 1 representing how similar this document is to the query.\n * @property {lunr.MatchData} matchData - Contains metadata about this match including which term(s) caused the match.\n */\n\n/**\n * Although lunr provides the ability to create queries using lunr.Query, it also provides a simple\n * query language which itself is parsed into an instance of lunr.Query.\n *\n * For programmatically building queries it is advised to directly use lunr.Query, the query language\n * is best used for human entered text rather than program generated text.\n *\n * At its simplest queries can just be a single term, e.g. `hello`, multiple terms are also supported\n * and will be combined with OR, e.g `hello world` will match documents that contain either 'hello'\n * or 'world', though those that contain both will rank higher in the results.\n *\n * Wildcards can be included in terms to match one or more unspecified characters, these wildcards can\n * be inserted anywhere within the term, and more than one wildcard can exist in a single term. Adding\n * wildcards will increase the number of documents that will be found but can also have a negative\n * impact on query performance, especially with wildcards at the beginning of a term.\n *\n * Terms can be restricted to specific fields, e.g. `title:hello`, only documents with the term\n * hello in the title field will match this query. Using a field not present in the index will lead\n * to an error being thrown.\n *\n * Modifiers can also be added to terms, lunr supports edit distance and boost modifiers on terms. A term\n * boost will make documents matching that term score higher, e.g. `foo^5`. Edit distance is also supported\n * to provide fuzzy matching, e.g. 'hello~2' will match documents with hello with an edit distance of 2.\n * Avoid large values for edit distance to improve query performance.\n *\n * Each term also supports a presence modifier. By default a term's presence in document is optional, however\n * this can be changed to either required or prohibited. For a term's presence to be required in a document the\n * term should be prefixed with a '+', e.g. `+foo bar` is a search for documents that must contain 'foo' and\n * optionally contain 'bar'. Conversely a leading '-' sets the terms presence to prohibited, i.e. it must not\n * appear in a document, e.g. `-foo bar` is a search for documents that do not contain 'foo' but may contain 'bar'.\n *\n * To escape special characters the backslash character '\\' can be used, this allows searches to include\n * characters that would normally be considered modifiers, e.g. `foo\\~2` will search for a term \"foo~2\" instead\n * of attempting to apply a boost of 2 to the search term \"foo\".\n *\n * @typedef {string} lunr.Index~QueryString\n * @example Simple single term query\n * hello\n * @example Multiple term query\n * hello world\n * @example term scoped to a field\n * title:hello\n * @example term with a boost of 10\n * hello^10\n * @example term with an edit distance of 2\n * hello~2\n * @example terms with presence modifiers\n * -foo +bar baz\n */\n\n/**\n * Performs a search against the index using lunr query syntax.\n *\n * Results will be returned sorted by their score, the most relevant results\n * will be returned first. For details on how the score is calculated, please see\n * the {@link https://lunrjs.com/guides/searching.html#scoring|guide}.\n *\n * For more programmatic querying use lunr.Index#query.\n *\n * @param {lunr.Index~QueryString} queryString - A string containing a lunr query.\n * @throws {lunr.QueryParseError} If the passed query string cannot be parsed.\n * @returns {lunr.Index~Result[]}\n */\nlunr.Index.prototype.search = function (queryString) {\n return this.query(function (query) {\n var parser = new lunr.QueryParser(queryString, query)\n parser.parse()\n })\n}\n\n/**\n * A query builder callback provides a query object to be used to express\n * the query to perform on the index.\n *\n * @callback lunr.Index~queryBuilder\n * @param {lunr.Query} query - The query object to build up.\n * @this lunr.Query\n */\n\n/**\n * Performs a query against the index using the yielded lunr.Query object.\n *\n * If performing programmatic queries against the index, this method is preferred\n * over lunr.Index#search so as to avoid the additional query parsing overhead.\n *\n * A query object is yielded to the supplied function which should be used to\n * express the query to be run against the index.\n *\n * Note that although this function takes a callback parameter it is _not_ an\n * asynchronous operation, the callback is just yielded a query object to be\n * customized.\n *\n * @param {lunr.Index~queryBuilder} fn - A function that is used to build the query.\n * @returns {lunr.Index~Result[]}\n */\nlunr.Index.prototype.query = function (fn) {\n // for each query clause\n // * process terms\n // * expand terms from token set\n // * find matching documents and metadata\n // * get document vectors\n // * score documents\n\n var query = new lunr.Query(this.fields),\n matchingFields = Object.create(null),\n queryVectors = Object.create(null),\n termFieldCache = Object.create(null),\n requiredMatches = Object.create(null),\n prohibitedMatches = Object.create(null)\n\n /*\n * To support field level boosts a query vector is created per\n * field. An empty vector is eagerly created to support negated\n * queries.\n */\n for (var i = 0; i < this.fields.length; i++) {\n queryVectors[this.fields[i]] = new lunr.Vector\n }\n\n fn.call(query, query)\n\n for (var i = 0; i < query.clauses.length; i++) {\n /*\n * Unless the pipeline has been disabled for this term, which is\n * the case for terms with wildcards, we need to pass the clause\n * term through the search pipeline. A pipeline returns an array\n * of processed terms. Pipeline functions may expand the passed\n * term, which means we may end up performing multiple index lookups\n * for a single query term.\n */\n var clause = query.clauses[i],\n terms = null,\n clauseMatches = lunr.Set.empty\n\n if (clause.usePipeline) {\n terms = this.pipeline.runString(clause.term, {\n fields: clause.fields\n })\n } else {\n terms = [clause.term]\n }\n\n for (var m = 0; m < terms.length; m++) {\n var term = terms[m]\n\n /*\n * Each term returned from the pipeline needs to use the same query\n * clause object, e.g. the same boost and or edit distance. The\n * simplest way to do this is to re-use the clause object but mutate\n * its term property.\n */\n clause.term = term\n\n /*\n * From the term in the clause we create a token set which will then\n * be used to intersect the indexes token set to get a list of terms\n * to lookup in the inverted index\n */\n var termTokenSet = lunr.TokenSet.fromClause(clause),\n expandedTerms = this.tokenSet.intersect(termTokenSet).toArray()\n\n /*\n * If a term marked as required does not exist in the tokenSet it is\n * impossible for the search to return any matches. We set all the field\n * scoped required matches set to empty and stop examining any further\n * clauses.\n */\n if (expandedTerms.length === 0 && clause.presence === lunr.Query.presence.REQUIRED) {\n for (var k = 0; k < clause.fields.length; k++) {\n var field = clause.fields[k]\n requiredMatches[field] = lunr.Set.empty\n }\n\n break\n }\n\n for (var j = 0; j < expandedTerms.length; j++) {\n /*\n * For each term get the posting and termIndex, this is required for\n * building the query vector.\n */\n var expandedTerm = expandedTerms[j],\n posting = this.invertedIndex[expandedTerm],\n termIndex = posting._index\n\n for (var k = 0; k < clause.fields.length; k++) {\n /*\n * For each field that this query term is scoped by (by default\n * all fields are in scope) we need to get all the document refs\n * that have this term in that field.\n *\n * The posting is the entry in the invertedIndex for the matching\n * term from above.\n */\n var field = clause.fields[k],\n fieldPosting = posting[field],\n matchingDocumentRefs = Object.keys(fieldPosting),\n termField = expandedTerm + \"/\" + field,\n matchingDocumentsSet = new lunr.Set(matchingDocumentRefs)\n\n /*\n * if the presence of this term is required ensure that the matching\n * documents are added to the set of required matches for this clause.\n *\n */\n if (clause.presence == lunr.Query.presence.REQUIRED) {\n clauseMatches = clauseMatches.union(matchingDocumentsSet)\n\n if (requiredMatches[field] === undefined) {\n requiredMatches[field] = lunr.Set.complete\n }\n }\n\n /*\n * if the presence of this term is prohibited ensure that the matching\n * documents are added to the set of prohibited matches for this field,\n * creating that set if it does not yet exist.\n */\n if (clause.presence == lunr.Query.presence.PROHIBITED) {\n if (prohibitedMatches[field] === undefined) {\n prohibitedMatches[field] = lunr.Set.empty\n }\n\n prohibitedMatches[field] = prohibitedMatches[field].union(matchingDocumentsSet)\n\n /*\n * Prohibited matches should not be part of the query vector used for\n * similarity scoring and no metadata should be extracted so we continue\n * to the next field\n */\n continue\n }\n\n /*\n * The query field vector is populated using the termIndex found for\n * the term and a unit value with the appropriate boost applied.\n * Using upsert because there could already be an entry in the vector\n * for the term we are working with. In that case we just add the scores\n * together.\n */\n queryVectors[field].upsert(termIndex, clause.boost, function (a, b) { return a + b })\n\n /**\n * If we've already seen this term, field combo then we've already collected\n * the matching documents and metadata, no need to go through all that again\n */\n if (termFieldCache[termField]) {\n continue\n }\n\n for (var l = 0; l < matchingDocumentRefs.length; l++) {\n /*\n * All metadata for this term/field/document triple\n * are then extracted and collected into an instance\n * of lunr.MatchData ready to be returned in the query\n * results\n */\n var matchingDocumentRef = matchingDocumentRefs[l],\n matchingFieldRef = new lunr.FieldRef (matchingDocumentRef, field),\n metadata = fieldPosting[matchingDocumentRef],\n fieldMatch\n\n if ((fieldMatch = matchingFields[matchingFieldRef]) === undefined) {\n matchingFields[matchingFieldRef] = new lunr.MatchData (expandedTerm, field, metadata)\n } else {\n fieldMatch.add(expandedTerm, field, metadata)\n }\n\n }\n\n termFieldCache[termField] = true\n }\n }\n }\n\n /**\n * If the presence was required we need to update the requiredMatches field sets.\n * We do this after all fields for the term have collected their matches because\n * the clause terms presence is required in _any_ of the fields not _all_ of the\n * fields.\n */\n if (clause.presence === lunr.Query.presence.REQUIRED) {\n for (var k = 0; k < clause.fields.length; k++) {\n var field = clause.fields[k]\n requiredMatches[field] = requiredMatches[field].intersect(clauseMatches)\n }\n }\n }\n\n /**\n * Need to combine the field scoped required and prohibited\n * matching documents into a global set of required and prohibited\n * matches\n */\n var allRequiredMatches = lunr.Set.complete,\n allProhibitedMatches = lunr.Set.empty\n\n for (var i = 0; i < this.fields.length; i++) {\n var field = this.fields[i]\n\n if (requiredMatches[field]) {\n allRequiredMatches = allRequiredMatches.intersect(requiredMatches[field])\n }\n\n if (prohibitedMatches[field]) {\n allProhibitedMatches = allProhibitedMatches.union(prohibitedMatches[field])\n }\n }\n\n var matchingFieldRefs = Object.keys(matchingFields),\n results = [],\n matches = Object.create(null)\n\n /*\n * If the query is negated (contains only prohibited terms)\n * we need to get _all_ fieldRefs currently existing in the\n * index. This is only done when we know that the query is\n * entirely prohibited terms to avoid any cost of getting all\n * fieldRefs unnecessarily.\n *\n * Additionally, blank MatchData must be created to correctly\n * populate the results.\n */\n if (query.isNegated()) {\n matchingFieldRefs = Object.keys(this.fieldVectors)\n\n for (var i = 0; i < matchingFieldRefs.length; i++) {\n var matchingFieldRef = matchingFieldRefs[i]\n var fieldRef = lunr.FieldRef.fromString(matchingFieldRef)\n matchingFields[matchingFieldRef] = new lunr.MatchData\n }\n }\n\n for (var i = 0; i < matchingFieldRefs.length; i++) {\n /*\n * Currently we have document fields that match the query, but we\n * need to return documents. The matchData and scores are combined\n * from multiple fields belonging to the same document.\n *\n * Scores are calculated by field, using the query vectors created\n * above, and combined into a final document score using addition.\n */\n var fieldRef = lunr.FieldRef.fromString(matchingFieldRefs[i]),\n docRef = fieldRef.docRef\n\n if (!allRequiredMatches.contains(docRef)) {\n continue\n }\n\n if (allProhibitedMatches.contains(docRef)) {\n continue\n }\n\n var fieldVector = this.fieldVectors[fieldRef],\n score = queryVectors[fieldRef.fieldName].similarity(fieldVector),\n docMatch\n\n if ((docMatch = matches[docRef]) !== undefined) {\n docMatch.score += score\n docMatch.matchData.combine(matchingFields[fieldRef])\n } else {\n var match = {\n ref: docRef,\n score: score,\n matchData: matchingFields[fieldRef]\n }\n matches[docRef] = match\n results.push(match)\n }\n }\n\n /*\n * Sort the results objects by score, highest first.\n */\n return results.sort(function (a, b) {\n return b.score - a.score\n })\n}\n\n/**\n * Prepares the index for JSON serialization.\n *\n * The schema for this JSON blob will be described in a\n * separate JSON schema file.\n *\n * @returns {Object}\n */\nlunr.Index.prototype.toJSON = function () {\n var invertedIndex = Object.keys(this.invertedIndex)\n .sort()\n .map(function (term) {\n return [term, this.invertedIndex[term]]\n }, this)\n\n var fieldVectors = Object.keys(this.fieldVectors)\n .map(function (ref) {\n return [ref, this.fieldVectors[ref].toJSON()]\n }, this)\n\n return {\n version: lunr.version,\n fields: this.fields,\n fieldVectors: fieldVectors,\n invertedIndex: invertedIndex,\n pipeline: this.pipeline.toJSON()\n }\n}\n\n/**\n * Loads a previously serialized lunr.Index\n *\n * @param {Object} serializedIndex - A previously serialized lunr.Index\n * @returns {lunr.Index}\n */\nlunr.Index.load = function (serializedIndex) {\n var attrs = {},\n fieldVectors = {},\n serializedVectors = serializedIndex.fieldVectors,\n invertedIndex = Object.create(null),\n serializedInvertedIndex = serializedIndex.invertedIndex,\n tokenSetBuilder = new lunr.TokenSet.Builder,\n pipeline = lunr.Pipeline.load(serializedIndex.pipeline)\n\n if (serializedIndex.version != lunr.version) {\n lunr.utils.warn(\"Version mismatch when loading serialised index. Current version of lunr '\" + lunr.version + \"' does not match serialized index '\" + serializedIndex.version + \"'\")\n }\n\n for (var i = 0; i < serializedVectors.length; i++) {\n var tuple = serializedVectors[i],\n ref = tuple[0],\n elements = tuple[1]\n\n fieldVectors[ref] = new lunr.Vector(elements)\n }\n\n for (var i = 0; i < serializedInvertedIndex.length; i++) {\n var tuple = serializedInvertedIndex[i],\n term = tuple[0],\n posting = tuple[1]\n\n tokenSetBuilder.insert(term)\n invertedIndex[term] = posting\n }\n\n tokenSetBuilder.finish()\n\n attrs.fields = serializedIndex.fields\n\n attrs.fieldVectors = fieldVectors\n attrs.invertedIndex = invertedIndex\n attrs.tokenSet = tokenSetBuilder.root\n attrs.pipeline = pipeline\n\n return new lunr.Index(attrs)\n}\n/*!\n * lunr.Builder\n * Copyright (C) 2020 Oliver Nightingale\n */\n\n/**\n * lunr.Builder performs indexing on a set of documents and\n * returns instances of lunr.Index ready for querying.\n *\n * All configuration of the index is done via the builder, the\n * fields to index, the document reference, the text processing\n * pipeline and document scoring parameters are all set on the\n * builder before indexing.\n *\n * @constructor\n * @property {string} _ref - Internal reference to the document reference field.\n * @property {string[]} _fields - Internal reference to the document fields to index.\n * @property {object} invertedIndex - The inverted index maps terms to document fields.\n * @property {object} documentTermFrequencies - Keeps track of document term frequencies.\n * @property {object} documentLengths - Keeps track of the length of documents added to the index.\n * @property {lunr.tokenizer} tokenizer - Function for splitting strings into tokens for indexing.\n * @property {lunr.Pipeline} pipeline - The pipeline performs text processing on tokens before indexing.\n * @property {lunr.Pipeline} searchPipeline - A pipeline for processing search terms before querying the index.\n * @property {number} documentCount - Keeps track of the total number of documents indexed.\n * @property {number} _b - A parameter to control field length normalization, setting this to 0 disabled normalization, 1 fully normalizes field lengths, the default value is 0.75.\n * @property {number} _k1 - A parameter to control how quickly an increase in term frequency results in term frequency saturation, the default value is 1.2.\n * @property {number} termIndex - A counter incremented for each unique term, used to identify a terms position in the vector space.\n * @property {array} metadataWhitelist - A list of metadata keys that have been whitelisted for entry in the index.\n */\nlunr.Builder = function () {\n this._ref = \"id\"\n this._fields = Object.create(null)\n this._documents = Object.create(null)\n this.invertedIndex = Object.create(null)\n this.fieldTermFrequencies = {}\n this.fieldLengths = {}\n this.tokenizer = lunr.tokenizer\n this.pipeline = new lunr.Pipeline\n this.searchPipeline = new lunr.Pipeline\n this.documentCount = 0\n this._b = 0.75\n this._k1 = 1.2\n this.termIndex = 0\n this.metadataWhitelist = []\n}\n\n/**\n * Sets the document field used as the document reference. Every document must have this field.\n * The type of this field in the document should be a string, if it is not a string it will be\n * coerced into a string by calling toString.\n *\n * The default ref is 'id'.\n *\n * The ref should _not_ be changed during indexing, it should be set before any documents are\n * added to the index. Changing it during indexing can lead to inconsistent results.\n *\n * @param {string} ref - The name of the reference field in the document.\n */\nlunr.Builder.prototype.ref = function (ref) {\n this._ref = ref\n}\n\n/**\n * A function that is used to extract a field from a document.\n *\n * Lunr expects a field to be at the top level of a document, if however the field\n * is deeply nested within a document an extractor function can be used to extract\n * the right field for indexing.\n *\n * @callback fieldExtractor\n * @param {object} doc - The document being added to the index.\n * @returns {?(string|object|object[])} obj - The object that will be indexed for this field.\n * @example Extracting a nested field\n * function (doc) { return doc.nested.field }\n */\n\n/**\n * Adds a field to the list of document fields that will be indexed. Every document being\n * indexed should have this field. Null values for this field in indexed documents will\n * not cause errors but will limit the chance of that document being retrieved by searches.\n *\n * All fields should be added before adding documents to the index. Adding fields after\n * a document has been indexed will have no effect on already indexed documents.\n *\n * Fields can be boosted at build time. This allows terms within that field to have more\n * importance when ranking search results. Use a field boost to specify that matches within\n * one field are more important than other fields.\n *\n * @param {string} fieldName - The name of a field to index in all documents.\n * @param {object} attributes - Optional attributes associated with this field.\n * @param {number} [attributes.boost=1] - Boost applied to all terms within this field.\n * @param {fieldExtractor} [attributes.extractor] - Function to extract a field from a document.\n * @throws {RangeError} fieldName cannot contain unsupported characters '/'\n */\nlunr.Builder.prototype.field = function (fieldName, attributes) {\n if (/\\//.test(fieldName)) {\n throw new RangeError (\"Field '\" + fieldName + \"' contains illegal character '/'\")\n }\n\n this._fields[fieldName] = attributes || {}\n}\n\n/**\n * A parameter to tune the amount of field length normalisation that is applied when\n * calculating relevance scores. A value of 0 will completely disable any normalisation\n * and a value of 1 will fully normalise field lengths. The default is 0.75. Values of b\n * will be clamped to the range 0 - 1.\n *\n * @param {number} number - The value to set for this tuning parameter.\n */\nlunr.Builder.prototype.b = function (number) {\n if (number < 0) {\n this._b = 0\n } else if (number > 1) {\n this._b = 1\n } else {\n this._b = number\n }\n}\n\n/**\n * A parameter that controls the speed at which a rise in term frequency results in term\n * frequency saturation. The default value is 1.2. Setting this to a higher value will give\n * slower saturation levels, a lower value will result in quicker saturation.\n *\n * @param {number} number - The value to set for this tuning parameter.\n */\nlunr.Builder.prototype.k1 = function (number) {\n this._k1 = number\n}\n\n/**\n * Adds a document to the index.\n *\n * Before adding fields to the index the index should have been fully setup, with the document\n * ref and all fields to index already having been specified.\n *\n * The document must have a field name as specified by the ref (by default this is 'id') and\n * it should have all fields defined for indexing, though null or undefined values will not\n * cause errors.\n *\n * Entire documents can be boosted at build time. Applying a boost to a document indicates that\n * this document should rank higher in search results than other documents.\n *\n * @param {object} doc - The document to add to the index.\n * @param {object} attributes - Optional attributes associated with this document.\n * @param {number} [attributes.boost=1] - Boost applied to all terms within this document.\n */\nlunr.Builder.prototype.add = function (doc, attributes) {\n var docRef = doc[this._ref],\n fields = Object.keys(this._fields)\n\n this._documents[docRef] = attributes || {}\n this.documentCount += 1\n\n for (var i = 0; i < fields.length; i++) {\n var fieldName = fields[i],\n extractor = this._fields[fieldName].extractor,\n field = extractor ? extractor(doc) : doc[fieldName],\n tokens = this.tokenizer(field, {\n fields: [fieldName]\n }),\n terms = this.pipeline.run(tokens),\n fieldRef = new lunr.FieldRef (docRef, fieldName),\n fieldTerms = Object.create(null)\n\n this.fieldTermFrequencies[fieldRef] = fieldTerms\n this.fieldLengths[fieldRef] = 0\n\n // store the length of this field for this document\n this.fieldLengths[fieldRef] += terms.length\n\n // calculate term frequencies for this field\n for (var j = 0; j < terms.length; j++) {\n var term = terms[j]\n\n if (fieldTerms[term] == undefined) {\n fieldTerms[term] = 0\n }\n\n fieldTerms[term] += 1\n\n // add to inverted index\n // create an initial posting if one doesn't exist\n if (this.invertedIndex[term] == undefined) {\n var posting = Object.create(null)\n posting[\"_index\"] = this.termIndex\n this.termIndex += 1\n\n for (var k = 0; k < fields.length; k++) {\n posting[fields[k]] = Object.create(null)\n }\n\n this.invertedIndex[term] = posting\n }\n\n // add an entry for this term/fieldName/docRef to the invertedIndex\n if (this.invertedIndex[term][fieldName][docRef] == undefined) {\n this.invertedIndex[term][fieldName][docRef] = Object.create(null)\n }\n\n // store all whitelisted metadata about this token in the\n // inverted index\n for (var l = 0; l < this.metadataWhitelist.length; l++) {\n var metadataKey = this.metadataWhitelist[l],\n metadata = term.metadata[metadataKey]\n\n if (this.invertedIndex[term][fieldName][docRef][metadataKey] == undefined) {\n this.invertedIndex[term][fieldName][docRef][metadataKey] = []\n }\n\n this.invertedIndex[term][fieldName][docRef][metadataKey].push(metadata)\n }\n }\n\n }\n}\n\n/**\n * Calculates the average document length for this index\n *\n * @private\n */\nlunr.Builder.prototype.calculateAverageFieldLengths = function () {\n\n var fieldRefs = Object.keys(this.fieldLengths),\n numberOfFields = fieldRefs.length,\n accumulator = {},\n documentsWithField = {}\n\n for (var i = 0; i < numberOfFields; i++) {\n var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]),\n field = fieldRef.fieldName\n\n documentsWithField[field] || (documentsWithField[field] = 0)\n documentsWithField[field] += 1\n\n accumulator[field] || (accumulator[field] = 0)\n accumulator[field] += this.fieldLengths[fieldRef]\n }\n\n var fields = Object.keys(this._fields)\n\n for (var i = 0; i < fields.length; i++) {\n var fieldName = fields[i]\n accumulator[fieldName] = accumulator[fieldName] / documentsWithField[fieldName]\n }\n\n this.averageFieldLength = accumulator\n}\n\n/**\n * Builds a vector space model of every document using lunr.Vector\n *\n * @private\n */\nlunr.Builder.prototype.createFieldVectors = function () {\n var fieldVectors = {},\n fieldRefs = Object.keys(this.fieldTermFrequencies),\n fieldRefsLength = fieldRefs.length,\n termIdfCache = Object.create(null)\n\n for (var i = 0; i < fieldRefsLength; i++) {\n var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]),\n fieldName = fieldRef.fieldName,\n fieldLength = this.fieldLengths[fieldRef],\n fieldVector = new lunr.Vector,\n termFrequencies = this.fieldTermFrequencies[fieldRef],\n terms = Object.keys(termFrequencies),\n termsLength = terms.length\n\n\n var fieldBoost = this._fields[fieldName].boost || 1,\n docBoost = this._documents[fieldRef.docRef].boost || 1\n\n for (var j = 0; j < termsLength; j++) {\n var term = terms[j],\n tf = termFrequencies[term],\n termIndex = this.invertedIndex[term]._index,\n idf, score, scoreWithPrecision\n\n if (termIdfCache[term] === undefined) {\n idf = lunr.idf(this.invertedIndex[term], this.documentCount)\n termIdfCache[term] = idf\n } else {\n idf = termIdfCache[term]\n }\n\n score = idf * ((this._k1 + 1) * tf) / (this._k1 * (1 - this._b + this._b * (fieldLength / this.averageFieldLength[fieldName])) + tf)\n score *= fieldBoost\n score *= docBoost\n scoreWithPrecision = Math.round(score * 1000) / 1000\n // Converts 1.23456789 to 1.234.\n // Reducing the precision so that the vectors take up less\n // space when serialised. Doing it now so that they behave\n // the same before and after serialisation. Also, this is\n // the fastest approach to reducing a number's precision in\n // JavaScript.\n\n fieldVector.insert(termIndex, scoreWithPrecision)\n }\n\n fieldVectors[fieldRef] = fieldVector\n }\n\n this.fieldVectors = fieldVectors\n}\n\n/**\n * Creates a token set of all tokens in the index using lunr.TokenSet\n *\n * @private\n */\nlunr.Builder.prototype.createTokenSet = function () {\n this.tokenSet = lunr.TokenSet.fromArray(\n Object.keys(this.invertedIndex).sort()\n )\n}\n\n/**\n * Builds the index, creating an instance of lunr.Index.\n *\n * This completes the indexing process and should only be called\n * once all documents have been added to the index.\n *\n * @returns {lunr.Index}\n */\nlunr.Builder.prototype.build = function () {\n this.calculateAverageFieldLengths()\n this.createFieldVectors()\n this.createTokenSet()\n\n return new lunr.Index({\n invertedIndex: this.invertedIndex,\n fieldVectors: this.fieldVectors,\n tokenSet: this.tokenSet,\n fields: Object.keys(this._fields),\n pipeline: this.searchPipeline\n })\n}\n\n/**\n * Applies a plugin to the index builder.\n *\n * A plugin is a function that is called with the index builder as its context.\n * Plugins can be used to customise or extend the behaviour of the index\n * in some way. A plugin is just a function, that encapsulated the custom\n * behaviour that should be applied when building the index.\n *\n * The plugin function will be called with the index builder as its argument, additional\n * arguments can also be passed when calling use. The function will be called\n * with the index builder as its context.\n *\n * @param {Function} plugin The plugin to apply.\n */\nlunr.Builder.prototype.use = function (fn) {\n var args = Array.prototype.slice.call(arguments, 1)\n args.unshift(this)\n fn.apply(this, args)\n}\n/**\n * Contains and collects metadata about a matching document.\n * A single instance of lunr.MatchData is returned as part of every\n * lunr.Index~Result.\n *\n * @constructor\n * @param {string} term - The term this match data is associated with\n * @param {string} field - The field in which the term was found\n * @param {object} metadata - The metadata recorded about this term in this field\n * @property {object} metadata - A cloned collection of metadata associated with this document.\n * @see {@link lunr.Index~Result}\n */\nlunr.MatchData = function (term, field, metadata) {\n var clonedMetadata = Object.create(null),\n metadataKeys = Object.keys(metadata || {})\n\n // Cloning the metadata to prevent the original\n // being mutated during match data combination.\n // Metadata is kept in an array within the inverted\n // index so cloning the data can be done with\n // Array#slice\n for (var i = 0; i < metadataKeys.length; i++) {\n var key = metadataKeys[i]\n clonedMetadata[key] = metadata[key].slice()\n }\n\n this.metadata = Object.create(null)\n\n if (term !== undefined) {\n this.metadata[term] = Object.create(null)\n this.metadata[term][field] = clonedMetadata\n }\n}\n\n/**\n * An instance of lunr.MatchData will be created for every term that matches a\n * document. However only one instance is required in a lunr.Index~Result. This\n * method combines metadata from another instance of lunr.MatchData with this\n * objects metadata.\n *\n * @param {lunr.MatchData} otherMatchData - Another instance of match data to merge with this one.\n * @see {@link lunr.Index~Result}\n */\nlunr.MatchData.prototype.combine = function (otherMatchData) {\n var terms = Object.keys(otherMatchData.metadata)\n\n for (var i = 0; i < terms.length; i++) {\n var term = terms[i],\n fields = Object.keys(otherMatchData.metadata[term])\n\n if (this.metadata[term] == undefined) {\n this.metadata[term] = Object.create(null)\n }\n\n for (var j = 0; j < fields.length; j++) {\n var field = fields[j],\n keys = Object.keys(otherMatchData.metadata[term][field])\n\n if (this.metadata[term][field] == undefined) {\n this.metadata[term][field] = Object.create(null)\n }\n\n for (var k = 0; k < keys.length; k++) {\n var key = keys[k]\n\n if (this.metadata[term][field][key] == undefined) {\n this.metadata[term][field][key] = otherMatchData.metadata[term][field][key]\n } else {\n this.metadata[term][field][key] = this.metadata[term][field][key].concat(otherMatchData.metadata[term][field][key])\n }\n\n }\n }\n }\n}\n\n/**\n * Add metadata for a term/field pair to this instance of match data.\n *\n * @param {string} term - The term this match data is associated with\n * @param {string} field - The field in which the term was found\n * @param {object} metadata - The metadata recorded about this term in this field\n */\nlunr.MatchData.prototype.add = function (term, field, metadata) {\n if (!(term in this.metadata)) {\n this.metadata[term] = Object.create(null)\n this.metadata[term][field] = metadata\n return\n }\n\n if (!(field in this.metadata[term])) {\n this.metadata[term][field] = metadata\n return\n }\n\n var metadataKeys = Object.keys(metadata)\n\n for (var i = 0; i < metadataKeys.length; i++) {\n var key = metadataKeys[i]\n\n if (key in this.metadata[term][field]) {\n this.metadata[term][field][key] = this.metadata[term][field][key].concat(metadata[key])\n } else {\n this.metadata[term][field][key] = metadata[key]\n }\n }\n}\n/**\n * A lunr.Query provides a programmatic way of defining queries to be performed\n * against a {@link lunr.Index}.\n *\n * Prefer constructing a lunr.Query using the {@link lunr.Index#query} method\n * so the query object is pre-initialized with the right index fields.\n *\n * @constructor\n * @property {lunr.Query~Clause[]} clauses - An array of query clauses.\n * @property {string[]} allFields - An array of all available fields in a lunr.Index.\n */\nlunr.Query = function (allFields) {\n this.clauses = []\n this.allFields = allFields\n}\n\n/**\n * Constants for indicating what kind of automatic wildcard insertion will be used when constructing a query clause.\n *\n * This allows wildcards to be added to the beginning and end of a term without having to manually do any string\n * concatenation.\n *\n * The wildcard constants can be bitwise combined to select both leading and trailing wildcards.\n *\n * @constant\n * @default\n * @property {number} wildcard.NONE - The term will have no wildcards inserted, this is the default behaviour\n * @property {number} wildcard.LEADING - Prepend the term with a wildcard, unless a leading wildcard already exists\n * @property {number} wildcard.TRAILING - Append a wildcard to the term, unless a trailing wildcard already exists\n * @see lunr.Query~Clause\n * @see lunr.Query#clause\n * @see lunr.Query#term\n * @example query term with trailing wildcard\n * query.term('foo', { wildcard: lunr.Query.wildcard.TRAILING })\n * @example query term with leading and trailing wildcard\n * query.term('foo', {\n * wildcard: lunr.Query.wildcard.LEADING | lunr.Query.wildcard.TRAILING\n * })\n */\n\nlunr.Query.wildcard = new String (\"*\")\nlunr.Query.wildcard.NONE = 0\nlunr.Query.wildcard.LEADING = 1\nlunr.Query.wildcard.TRAILING = 2\n\n/**\n * Constants for indicating what kind of presence a term must have in matching documents.\n *\n * @constant\n * @enum {number}\n * @see lunr.Query~Clause\n * @see lunr.Query#clause\n * @see lunr.Query#term\n * @example query term with required presence\n * query.term('foo', { presence: lunr.Query.presence.REQUIRED })\n */\nlunr.Query.presence = {\n /**\n * Term's presence in a document is optional, this is the default value.\n */\n OPTIONAL: 1,\n\n /**\n * Term's presence in a document is required, documents that do not contain\n * this term will not be returned.\n */\n REQUIRED: 2,\n\n /**\n * Term's presence in a document is prohibited, documents that do contain\n * this term will not be returned.\n */\n PROHIBITED: 3\n}\n\n/**\n * A single clause in a {@link lunr.Query} contains a term and details on how to\n * match that term against a {@link lunr.Index}.\n *\n * @typedef {Object} lunr.Query~Clause\n * @property {string[]} fields - The fields in an index this clause should be matched against.\n * @property {number} [boost=1] - Any boost that should be applied when matching this clause.\n * @property {number} [editDistance] - Whether the term should have fuzzy matching applied, and how fuzzy the match should be.\n * @property {boolean} [usePipeline] - Whether the term should be passed through the search pipeline.\n * @property {number} [wildcard=lunr.Query.wildcard.NONE] - Whether the term should have wildcards appended or prepended.\n * @property {number} [presence=lunr.Query.presence.OPTIONAL] - The terms presence in any matching documents.\n */\n\n/**\n * Adds a {@link lunr.Query~Clause} to this query.\n *\n * Unless the clause contains the fields to be matched all fields will be matched. In addition\n * a default boost of 1 is applied to the clause.\n *\n * @param {lunr.Query~Clause} clause - The clause to add to this query.\n * @see lunr.Query~Clause\n * @returns {lunr.Query}\n */\nlunr.Query.prototype.clause = function (clause) {\n if (!('fields' in clause)) {\n clause.fields = this.allFields\n }\n\n if (!('boost' in clause)) {\n clause.boost = 1\n }\n\n if (!('usePipeline' in clause)) {\n clause.usePipeline = true\n }\n\n if (!('wildcard' in clause)) {\n clause.wildcard = lunr.Query.wildcard.NONE\n }\n\n if ((clause.wildcard & lunr.Query.wildcard.LEADING) && (clause.term.charAt(0) != lunr.Query.wildcard)) {\n clause.term = \"*\" + clause.term\n }\n\n if ((clause.wildcard & lunr.Query.wildcard.TRAILING) && (clause.term.slice(-1) != lunr.Query.wildcard)) {\n clause.term = \"\" + clause.term + \"*\"\n }\n\n if (!('presence' in clause)) {\n clause.presence = lunr.Query.presence.OPTIONAL\n }\n\n this.clauses.push(clause)\n\n return this\n}\n\n/**\n * A negated query is one in which every clause has a presence of\n * prohibited. These queries require some special processing to return\n * the expected results.\n *\n * @returns boolean\n */\nlunr.Query.prototype.isNegated = function () {\n for (var i = 0; i < this.clauses.length; i++) {\n if (this.clauses[i].presence != lunr.Query.presence.PROHIBITED) {\n return false\n }\n }\n\n return true\n}\n\n/**\n * Adds a term to the current query, under the covers this will create a {@link lunr.Query~Clause}\n * to the list of clauses that make up this query.\n *\n * The term is used as is, i.e. no tokenization will be performed by this method. Instead conversion\n * to a token or token-like string should be done before calling this method.\n *\n * The term will be converted to a string by calling `toString`. Multiple terms can be passed as an\n * array, each term in the array will share the same options.\n *\n * @param {object|object[]} term - The term(s) to add to the query.\n * @param {object} [options] - Any additional properties to add to the query clause.\n * @returns {lunr.Query}\n * @see lunr.Query#clause\n * @see lunr.Query~Clause\n * @example adding a single term to a query\n * query.term(\"foo\")\n * @example adding a single term to a query and specifying search fields, term boost and automatic trailing wildcard\n * query.term(\"foo\", {\n * fields: [\"title\"],\n * boost: 10,\n * wildcard: lunr.Query.wildcard.TRAILING\n * })\n * @example using lunr.tokenizer to convert a string to tokens before using them as terms\n * query.term(lunr.tokenizer(\"foo bar\"))\n */\nlunr.Query.prototype.term = function (term, options) {\n if (Array.isArray(term)) {\n term.forEach(function (t) { this.term(t, lunr.utils.clone(options)) }, this)\n return this\n }\n\n var clause = options || {}\n clause.term = term.toString()\n\n this.clause(clause)\n\n return this\n}\nlunr.QueryParseError = function (message, start, end) {\n this.name = \"QueryParseError\"\n this.message = message\n this.start = start\n this.end = end\n}\n\nlunr.QueryParseError.prototype = new Error\nlunr.QueryLexer = function (str) {\n this.lexemes = []\n this.str = str\n this.length = str.length\n this.pos = 0\n this.start = 0\n this.escapeCharPositions = []\n}\n\nlunr.QueryLexer.prototype.run = function () {\n var state = lunr.QueryLexer.lexText\n\n while (state) {\n state = state(this)\n }\n}\n\nlunr.QueryLexer.prototype.sliceString = function () {\n var subSlices = [],\n sliceStart = this.start,\n sliceEnd = this.pos\n\n for (var i = 0; i < this.escapeCharPositions.length; i++) {\n sliceEnd = this.escapeCharPositions[i]\n subSlices.push(this.str.slice(sliceStart, sliceEnd))\n sliceStart = sliceEnd + 1\n }\n\n subSlices.push(this.str.slice(sliceStart, this.pos))\n this.escapeCharPositions.length = 0\n\n return subSlices.join('')\n}\n\nlunr.QueryLexer.prototype.emit = function (type) {\n this.lexemes.push({\n type: type,\n str: this.sliceString(),\n start: this.start,\n end: this.pos\n })\n\n this.start = this.pos\n}\n\nlunr.QueryLexer.prototype.escapeCharacter = function () {\n this.escapeCharPositions.push(this.pos - 1)\n this.pos += 1\n}\n\nlunr.QueryLexer.prototype.next = function () {\n if (this.pos >= this.length) {\n return lunr.QueryLexer.EOS\n }\n\n var char = this.str.charAt(this.pos)\n this.pos += 1\n return char\n}\n\nlunr.QueryLexer.prototype.width = function () {\n return this.pos - this.start\n}\n\nlunr.QueryLexer.prototype.ignore = function () {\n if (this.start == this.pos) {\n this.pos += 1\n }\n\n this.start = this.pos\n}\n\nlunr.QueryLexer.prototype.backup = function () {\n this.pos -= 1\n}\n\nlunr.QueryLexer.prototype.acceptDigitRun = function () {\n var char, charCode\n\n do {\n char = this.next()\n charCode = char.charCodeAt(0)\n } while (charCode > 47 && charCode < 58)\n\n if (char != lunr.QueryLexer.EOS) {\n this.backup()\n }\n}\n\nlunr.QueryLexer.prototype.more = function () {\n return this.pos < this.length\n}\n\nlunr.QueryLexer.EOS = 'EOS'\nlunr.QueryLexer.FIELD = 'FIELD'\nlunr.QueryLexer.TERM = 'TERM'\nlunr.QueryLexer.EDIT_DISTANCE = 'EDIT_DISTANCE'\nlunr.QueryLexer.BOOST = 'BOOST'\nlunr.QueryLexer.PRESENCE = 'PRESENCE'\n\nlunr.QueryLexer.lexField = function (lexer) {\n lexer.backup()\n lexer.emit(lunr.QueryLexer.FIELD)\n lexer.ignore()\n return lunr.QueryLexer.lexText\n}\n\nlunr.QueryLexer.lexTerm = function (lexer) {\n if (lexer.width() > 1) {\n lexer.backup()\n lexer.emit(lunr.QueryLexer.TERM)\n }\n\n lexer.ignore()\n\n if (lexer.more()) {\n return lunr.QueryLexer.lexText\n }\n}\n\nlunr.QueryLexer.lexEditDistance = function (lexer) {\n lexer.ignore()\n lexer.acceptDigitRun()\n lexer.emit(lunr.QueryLexer.EDIT_DISTANCE)\n return lunr.QueryLexer.lexText\n}\n\nlunr.QueryLexer.lexBoost = function (lexer) {\n lexer.ignore()\n lexer.acceptDigitRun()\n lexer.emit(lunr.QueryLexer.BOOST)\n return lunr.QueryLexer.lexText\n}\n\nlunr.QueryLexer.lexEOS = function (lexer) {\n if (lexer.width() > 0) {\n lexer.emit(lunr.QueryLexer.TERM)\n }\n}\n\n// This matches the separator used when tokenising fields\n// within a document. These should match otherwise it is\n// not possible to search for some tokens within a document.\n//\n// It is possible for the user to change the separator on the\n// tokenizer so it _might_ clash with any other of the special\n// characters already used within the search string, e.g. :.\n//\n// This means that it is possible to change the separator in\n// such a way that makes some words unsearchable using a search\n// string.\nlunr.QueryLexer.termSeparator = lunr.tokenizer.separator\n\nlunr.QueryLexer.lexText = function (lexer) {\n while (true) {\n var char = lexer.next()\n\n if (char == lunr.QueryLexer.EOS) {\n return lunr.QueryLexer.lexEOS\n }\n\n // Escape character is '\\'\n if (char.charCodeAt(0) == 92) {\n lexer.escapeCharacter()\n continue\n }\n\n if (char == \":\") {\n return lunr.QueryLexer.lexField\n }\n\n if (char == \"~\") {\n lexer.backup()\n if (lexer.width() > 0) {\n lexer.emit(lunr.QueryLexer.TERM)\n }\n return lunr.QueryLexer.lexEditDistance\n }\n\n if (char == \"^\") {\n lexer.backup()\n if (lexer.width() > 0) {\n lexer.emit(lunr.QueryLexer.TERM)\n }\n return lunr.QueryLexer.lexBoost\n }\n\n // \"+\" indicates term presence is required\n // checking for length to ensure that only\n // leading \"+\" are considered\n if (char == \"+\" && lexer.width() === 1) {\n lexer.emit(lunr.QueryLexer.PRESENCE)\n return lunr.QueryLexer.lexText\n }\n\n // \"-\" indicates term presence is prohibited\n // checking for length to ensure that only\n // leading \"-\" are considered\n if (char == \"-\" && lexer.width() === 1) {\n lexer.emit(lunr.QueryLexer.PRESENCE)\n return lunr.QueryLexer.lexText\n }\n\n if (char.match(lunr.QueryLexer.termSeparator)) {\n return lunr.QueryLexer.lexTerm\n }\n }\n}\n\nlunr.QueryParser = function (str, query) {\n this.lexer = new lunr.QueryLexer (str)\n this.query = query\n this.currentClause = {}\n this.lexemeIdx = 0\n}\n\nlunr.QueryParser.prototype.parse = function () {\n this.lexer.run()\n this.lexemes = this.lexer.lexemes\n\n var state = lunr.QueryParser.parseClause\n\n while (state) {\n state = state(this)\n }\n\n return this.query\n}\n\nlunr.QueryParser.prototype.peekLexeme = function () {\n return this.lexemes[this.lexemeIdx]\n}\n\nlunr.QueryParser.prototype.consumeLexeme = function () {\n var lexeme = this.peekLexeme()\n this.lexemeIdx += 1\n return lexeme\n}\n\nlunr.QueryParser.prototype.nextClause = function () {\n var completedClause = this.currentClause\n this.query.clause(completedClause)\n this.currentClause = {}\n}\n\nlunr.QueryParser.parseClause = function (parser) {\n var lexeme = parser.peekLexeme()\n\n if (lexeme == undefined) {\n return\n }\n\n switch (lexeme.type) {\n case lunr.QueryLexer.PRESENCE:\n return lunr.QueryParser.parsePresence\n case lunr.QueryLexer.FIELD:\n return lunr.QueryParser.parseField\n case lunr.QueryLexer.TERM:\n return lunr.QueryParser.parseTerm\n default:\n var errorMessage = \"expected either a field or a term, found \" + lexeme.type\n\n if (lexeme.str.length >= 1) {\n errorMessage += \" with value '\" + lexeme.str + \"'\"\n }\n\n throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)\n }\n}\n\nlunr.QueryParser.parsePresence = function (parser) {\n var lexeme = parser.consumeLexeme()\n\n if (lexeme == undefined) {\n return\n }\n\n switch (lexeme.str) {\n case \"-\":\n parser.currentClause.presence = lunr.Query.presence.PROHIBITED\n break\n case \"+\":\n parser.currentClause.presence = lunr.Query.presence.REQUIRED\n break\n default:\n var errorMessage = \"unrecognised presence operator'\" + lexeme.str + \"'\"\n throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)\n }\n\n var nextLexeme = parser.peekLexeme()\n\n if (nextLexeme == undefined) {\n var errorMessage = \"expecting term or field, found nothing\"\n throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)\n }\n\n switch (nextLexeme.type) {\n case lunr.QueryLexer.FIELD:\n return lunr.QueryParser.parseField\n case lunr.QueryLexer.TERM:\n return lunr.QueryParser.parseTerm\n default:\n var errorMessage = \"expecting term or field, found '\" + nextLexeme.type + \"'\"\n throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)\n }\n}\n\nlunr.QueryParser.parseField = function (parser) {\n var lexeme = parser.consumeLexeme()\n\n if (lexeme == undefined) {\n return\n }\n\n if (parser.query.allFields.indexOf(lexeme.str) == -1) {\n var possibleFields = parser.query.allFields.map(function (f) { return \"'\" + f + \"'\" }).join(', '),\n errorMessage = \"unrecognised field '\" + lexeme.str + \"', possible fields: \" + possibleFields\n\n throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)\n }\n\n parser.currentClause.fields = [lexeme.str]\n\n var nextLexeme = parser.peekLexeme()\n\n if (nextLexeme == undefined) {\n var errorMessage = \"expecting term, found nothing\"\n throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)\n }\n\n switch (nextLexeme.type) {\n case lunr.QueryLexer.TERM:\n return lunr.QueryParser.parseTerm\n default:\n var errorMessage = \"expecting term, found '\" + nextLexeme.type + \"'\"\n throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)\n }\n}\n\nlunr.QueryParser.parseTerm = function (parser) {\n var lexeme = parser.consumeLexeme()\n\n if (lexeme == undefined) {\n return\n }\n\n parser.currentClause.term = lexeme.str.toLowerCase()\n\n if (lexeme.str.indexOf(\"*\") != -1) {\n parser.currentClause.usePipeline = false\n }\n\n var nextLexeme = parser.peekLexeme()\n\n if (nextLexeme == undefined) {\n parser.nextClause()\n return\n }\n\n switch (nextLexeme.type) {\n case lunr.QueryLexer.TERM:\n parser.nextClause()\n return lunr.QueryParser.parseTerm\n case lunr.QueryLexer.FIELD:\n parser.nextClause()\n return lunr.QueryParser.parseField\n case lunr.QueryLexer.EDIT_DISTANCE:\n return lunr.QueryParser.parseEditDistance\n case lunr.QueryLexer.BOOST:\n return lunr.QueryParser.parseBoost\n case lunr.QueryLexer.PRESENCE:\n parser.nextClause()\n return lunr.QueryParser.parsePresence\n default:\n var errorMessage = \"Unexpected lexeme type '\" + nextLexeme.type + \"'\"\n throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)\n }\n}\n\nlunr.QueryParser.parseEditDistance = function (parser) {\n var lexeme = parser.consumeLexeme()\n\n if (lexeme == undefined) {\n return\n }\n\n var editDistance = parseInt(lexeme.str, 10)\n\n if (isNaN(editDistance)) {\n var errorMessage = \"edit distance must be numeric\"\n throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)\n }\n\n parser.currentClause.editDistance = editDistance\n\n var nextLexeme = parser.peekLexeme()\n\n if (nextLexeme == undefined) {\n parser.nextClause()\n return\n }\n\n switch (nextLexeme.type) {\n case lunr.QueryLexer.TERM:\n parser.nextClause()\n return lunr.QueryParser.parseTerm\n case lunr.QueryLexer.FIELD:\n parser.nextClause()\n return lunr.QueryParser.parseField\n case lunr.QueryLexer.EDIT_DISTANCE:\n return lunr.QueryParser.parseEditDistance\n case lunr.QueryLexer.BOOST:\n return lunr.QueryParser.parseBoost\n case lunr.QueryLexer.PRESENCE:\n parser.nextClause()\n return lunr.QueryParser.parsePresence\n default:\n var errorMessage = \"Unexpected lexeme type '\" + nextLexeme.type + \"'\"\n throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)\n }\n}\n\nlunr.QueryParser.parseBoost = function (parser) {\n var lexeme = parser.consumeLexeme()\n\n if (lexeme == undefined) {\n return\n }\n\n var boost = parseInt(lexeme.str, 10)\n\n if (isNaN(boost)) {\n var errorMessage = \"boost must be numeric\"\n throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)\n }\n\n parser.currentClause.boost = boost\n\n var nextLexeme = parser.peekLexeme()\n\n if (nextLexeme == undefined) {\n parser.nextClause()\n return\n }\n\n switch (nextLexeme.type) {\n case lunr.QueryLexer.TERM:\n parser.nextClause()\n return lunr.QueryParser.parseTerm\n case lunr.QueryLexer.FIELD:\n parser.nextClause()\n return lunr.QueryParser.parseField\n case lunr.QueryLexer.EDIT_DISTANCE:\n return lunr.QueryParser.parseEditDistance\n case lunr.QueryLexer.BOOST:\n return lunr.QueryParser.parseBoost\n case lunr.QueryLexer.PRESENCE:\n parser.nextClause()\n return lunr.QueryParser.parsePresence\n default:\n var errorMessage = \"Unexpected lexeme type '\" + nextLexeme.type + \"'\"\n throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)\n }\n}\n\n /**\n * export the module via AMD, CommonJS or as a browser global\n * Export code from https://github.com/umdjs/umd/blob/master/returnExports.js\n */\n ;(function (root, factory) {\n if (typeof define === 'function' && define.amd) {\n // AMD. Register as an anonymous module.\n define(factory)\n } else if (typeof exports === 'object') {\n /**\n * Node. Does not work with strict CommonJS, but\n * only CommonJS-like enviroments that support module.exports,\n * like Node.\n */\n module.exports = factory()\n } else {\n // Browser globals (root is window)\n root.lunr = factory()\n }\n }(this, function () {\n /**\n * Just return a value to define the module export.\n * This example returns an object, but the module\n * can return a function as the exported value.\n */\n return lunr\n }))\n})();\n", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A RTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport lunr from \"lunr\"\n\nimport \"~/polyfills\"\n\nimport { Search, SearchIndexConfig } from \"../../_\"\nimport {\n SearchMessage,\n SearchMessageType\n} from \"../message\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Add support for usage with `iframe-worker` polyfill\n *\n * While `importScripts` is synchronous when executed inside of a web worker,\n * it's not possible to provide a synchronous polyfilled implementation. The\n * cool thing is that awaiting a non-Promise is a noop, so extending the type\n * definition to return a `Promise` shouldn't break anything.\n *\n * @see https://bit.ly/2PjDnXi - GitHub comment\n */\ndeclare global {\n function importScripts(...urls: string[]): Promise | void\n}\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Search index\n */\nlet index: Search\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch (= import) multi-language support through `lunr-languages`\n *\n * This function automatically imports the stemmers necessary to process the\n * languages, which are defined through the search index configuration.\n *\n * If the worker runs inside of an `iframe` (when using `iframe-worker` as\n * a shim), the base URL for the stemmers to be loaded must be determined by\n * searching for the first `script` element with a `src` attribute, which will\n * contain the contents of this script.\n *\n * @param config - Search index configuration\n *\n * @returns Promise resolving with no result\n */\nasync function setupSearchLanguages(\n config: SearchIndexConfig\n): Promise {\n let base = \"../lunr\"\n\n /* Detect `iframe-worker` and fix base URL */\n if (typeof parent !== \"undefined\" && \"IFrameWorker\" in parent) {\n const worker = document.querySelector(\"script[src]\")!\n const [path] = worker.src.split(\"/worker\")\n\n /* Prefix base with path */\n base = base.replace(\"..\", path)\n }\n\n /* Add scripts for languages */\n const scripts = []\n for (const lang of config.lang) {\n switch (lang) {\n\n /* Add segmenter for Japanese */\n case \"ja\":\n scripts.push(`${base}/tinyseg.js`)\n break\n\n /* Add segmenter for Hindi and Thai */\n case \"hi\":\n case \"th\":\n scripts.push(`${base}/wordcut.js`)\n break\n }\n\n /* Add language support */\n if (lang !== \"en\")\n scripts.push(`${base}/min/lunr.${lang}.min.js`)\n }\n\n /* Add multi-language support */\n if (config.lang.length > 1)\n scripts.push(`${base}/min/lunr.multi.min.js`)\n\n /* Load scripts synchronously */\n if (scripts.length)\n await importScripts(\n `${base}/min/lunr.stemmer.support.min.js`,\n ...scripts\n )\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Message handler\n *\n * @param message - Source message\n *\n * @returns Target message\n */\nexport async function handler(\n message: SearchMessage\n): Promise {\n switch (message.type) {\n\n /* Search setup message */\n case SearchMessageType.SETUP:\n await setupSearchLanguages(message.data.config)\n index = new Search(message.data)\n return {\n type: SearchMessageType.READY\n }\n\n /* Search query message */\n case SearchMessageType.QUERY:\n return {\n type: SearchMessageType.RESULT,\n data: index ? index.search(message.data) : { items: [] }\n }\n\n /* All other messages */\n default:\n throw new TypeError(\"Invalid message type\")\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Worker\n * ------------------------------------------------------------------------- */\n\n/* @ts-expect-error - expose Lunr.js in global scope, or stemmers won't work */\nself.lunr = lunr\n\n/* Handle messages */\naddEventListener(\"message\", async ev => {\n postMessage(await handler(ev.data))\n})\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n/* ----------------------------------------------------------------------------\n * Polyfills\n * ------------------------------------------------------------------------- */\n\n/* Polyfill `Object.entries` */\nif (!Object.entries)\n Object.entries = function (obj: object) {\n const data: [string, string][] = []\n for (const key of Object.keys(obj))\n // @ts-expect-error - ignore property access warning\n data.push([key, obj[key]])\n\n /* Return entries */\n return data\n }\n\n/* Polyfill `Object.values` */\nif (!Object.values)\n Object.values = function (obj: object) {\n const data: string[] = []\n for (const key of Object.keys(obj))\n // @ts-expect-error - ignore property access warning\n data.push(obj[key])\n\n /* Return values */\n return data\n }\n\n/* ------------------------------------------------------------------------- */\n\n/* Polyfills for `Element` */\nif (typeof Element !== \"undefined\") {\n\n /* Polyfill `Element.scrollTo` */\n if (!Element.prototype.scrollTo)\n Element.prototype.scrollTo = function (\n x?: ScrollToOptions | number, y?: number\n ): void {\n if (typeof x === \"object\") {\n this.scrollLeft = x.left!\n this.scrollTop = x.top!\n } else {\n this.scrollLeft = x!\n this.scrollTop = y!\n }\n }\n\n /* Polyfill `Element.replaceWith` */\n if (!Element.prototype.replaceWith)\n Element.prototype.replaceWith = function (\n ...nodes: Array\n ): void {\n const parent = this.parentNode\n if (parent) {\n if (nodes.length === 0)\n parent.removeChild(this)\n\n /* Replace children and create text nodes */\n for (let i = nodes.length - 1; i >= 0; i--) {\n let node = nodes[i]\n if (typeof node !== \"object\")\n node = document.createTextNode(node)\n else if (node.parentNode)\n node.parentNode.removeChild(node)\n\n /* Replace child or insert before previous sibling */\n if (!i)\n parent.replaceChild(node, this)\n else\n parent.insertBefore(this.previousSibling!, node)\n }\n }\n }\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport escapeHTML from \"escape-html\"\n\nimport { SearchIndexDocument } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search document\n */\nexport interface SearchDocument extends SearchIndexDocument {\n parent?: SearchIndexDocument /* Parent article */\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Search document mapping\n */\nexport type SearchDocumentMap = Map\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Create a search document mapping\n *\n * @param docs - Search index documents\n *\n * @returns Search document map\n */\nexport function setupSearchDocumentMap(\n docs: SearchIndexDocument[]\n): SearchDocumentMap {\n const documents = new Map()\n const parents = new Set()\n for (const doc of docs) {\n const [path, hash] = doc.location.split(\"#\")\n\n /* Extract location and title */\n const location = doc.location\n const title = doc.title\n\n /* Escape and cleanup text */\n const text = escapeHTML(doc.text)\n .replace(/\\s+(?=[,.:;!?])/g, \"\")\n .replace(/\\s+/g, \" \")\n\n /* Handle section */\n if (hash) {\n const parent = documents.get(path)!\n\n /* Ignore first section, override article */\n if (!parents.has(parent)) {\n parent.title = doc.title\n parent.text = text\n\n /* Remember that we processed the article */\n parents.add(parent)\n\n /* Add subsequent section */\n } else {\n documents.set(location, {\n location,\n title,\n text,\n parent\n })\n }\n\n /* Add article */\n } else {\n documents.set(location, {\n location,\n title,\n text\n })\n }\n }\n return documents\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport escapeHTML from \"escape-html\"\n\nimport { SearchIndexConfig } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search highlight function\n *\n * @param value - Value\n *\n * @returns Highlighted value\n */\nexport type SearchHighlightFn = (value: string) => string\n\n/**\n * Search highlight factory function\n *\n * @param query - Query value\n *\n * @returns Search highlight function\n */\nexport type SearchHighlightFactoryFn = (query: string) => SearchHighlightFn\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Create a search highlighter\n *\n * @param config - Search index configuration\n * @param escape - Whether to escape HTML\n *\n * @returns Search highlight factory function\n */\nexport function setupSearchHighlighter(\n config: SearchIndexConfig, escape: boolean\n): SearchHighlightFactoryFn {\n const separator = new RegExp(config.separator, \"img\")\n const highlight = (_: unknown, data: string, term: string) => {\n return `${data}${term}`\n }\n\n /* Return factory function */\n return (query: string) => {\n query = query\n .replace(/[\\s*+\\-:~^]+/g, \" \")\n .trim()\n\n /* Create search term match expression */\n const match = new RegExp(`(^|${config.separator})(${\n query\n .replace(/[|\\\\{}()[\\]^$+*?.-]/g, \"\\\\$&\")\n .replace(separator, \"|\")\n })`, \"img\")\n\n /* Highlight string value */\n return value => (\n escape\n ? escapeHTML(value)\n : value\n )\n .replace(match, highlight)\n .replace(/<\\/mark>(\\s+)]*>/img, \"$1\")\n }\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search query clause\n */\nexport interface SearchQueryClause {\n presence: lunr.Query.presence /* Clause presence */\n term: string /* Clause term */\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Search query terms\n */\nexport type SearchQueryTerms = Record\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Parse a search query for analysis\n *\n * @param value - Query value\n *\n * @returns Search query clauses\n */\nexport function parseSearchQuery(\n value: string\n): SearchQueryClause[] {\n const query = new (lunr as any).Query([\"title\", \"text\"])\n const parser = new (lunr as any).QueryParser(value, query)\n\n /* Parse and return query clauses */\n parser.parse()\n return query.clauses\n}\n\n/**\n * Analyze the search query clauses in regard to the search terms found\n *\n * @param query - Search query clauses\n * @param terms - Search terms\n *\n * @returns Search query terms\n */\nexport function getSearchQueryTerms(\n query: SearchQueryClause[], terms: string[]\n): SearchQueryTerms {\n const clauses = new Set(query)\n\n /* Match query clauses against terms */\n const result: SearchQueryTerms = {}\n for (let t = 0; t < terms.length; t++)\n for (const clause of clauses)\n if (terms[t].startsWith(clause.term)) {\n result[clause.term] = true\n clauses.delete(clause)\n }\n\n /* Annotate unmatched non-stopword query clauses */\n for (const clause of clauses)\n if (lunr.stopWordFilter?.(clause.term as any))\n result[clause.term] = false\n\n /* Return query terms */\n return result\n}\n", "/*\n * Copyright (c) 2016-2022 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n SearchDocument,\n SearchDocumentMap,\n setupSearchDocumentMap\n} from \"../document\"\nimport {\n SearchHighlightFactoryFn,\n setupSearchHighlighter\n} from \"../highlighter\"\nimport { SearchOptions } from \"../options\"\nimport {\n SearchQueryTerms,\n getSearchQueryTerms,\n parseSearchQuery\n} from \"../query\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search index configuration\n */\nexport interface SearchIndexConfig {\n lang: string[] /* Search languages */\n separator: string /* Search separator */\n}\n\n/**\n * Search index document\n */\nexport interface SearchIndexDocument {\n location: string /* Document location */\n title: string /* Document title */\n text: string /* Document text */\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Search index\n *\n * This interfaces describes the format of the `search_index.json` file which\n * is automatically built by the MkDocs search plugin.\n */\nexport interface SearchIndex {\n config: SearchIndexConfig /* Search index configuration */\n docs: SearchIndexDocument[] /* Search index documents */\n options: SearchOptions /* Search options */\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Search metadata\n */\nexport interface SearchMetadata {\n score: number /* Score (relevance) */\n terms: SearchQueryTerms /* Search query terms */\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Search result document\n */\nexport type SearchResultDocument = SearchDocument & SearchMetadata\n\n/**\n * Search result item\n */\nexport type SearchResultItem = SearchResultDocument[]\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Search result\n */\nexport interface SearchResult {\n items: SearchResultItem[] /* Search result items */\n suggestions?: string[] /* Search suggestions */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Compute the difference of two lists of strings\n *\n * @param a - 1st list of strings\n * @param b - 2nd list of strings\n *\n * @returns Difference\n */\nfunction difference(a: string[], b: string[]): string[] {\n const [x, y] = [new Set(a), new Set(b)]\n return [\n ...new Set([...x].filter(value => !y.has(value)))\n ]\n}\n\n/* ----------------------------------------------------------------------------\n * Class\n * ------------------------------------------------------------------------- */\n\n/**\n * Search index\n */\nexport class Search {\n\n /**\n * Search document mapping\n *\n * A mapping of URLs (including hash fragments) to the actual articles and\n * sections of the documentation. The search document mapping must be created\n * regardless of whether the index was prebuilt or not, as Lunr.js itself\n * only stores the actual index.\n */\n protected documents: SearchDocumentMap\n\n /**\n * Search highlight factory function\n */\n protected highlight: SearchHighlightFactoryFn\n\n /**\n * The underlying Lunr.js search index\n */\n protected index: lunr.Index\n\n /**\n * Search options\n */\n protected options: SearchOptions\n\n /**\n * Create the search integration\n *\n * @param data - Search index\n */\n public constructor({ config, docs, options }: SearchIndex) {\n this.options = options\n\n /* Set up document map and highlighter factory */\n this.documents = setupSearchDocumentMap(docs)\n this.highlight = setupSearchHighlighter(config, false)\n\n /* Set separator for tokenizer */\n lunr.tokenizer.separator = new RegExp(config.separator)\n\n /* Create search index */\n this.index = lunr(function () {\n\n /* Set up multi-language support */\n if (config.lang.length === 1 && config.lang[0] !== \"en\") {\n this.use((lunr as any)[config.lang[0]])\n } else if (config.lang.length > 1) {\n this.use((lunr as any).multiLanguage(...config.lang))\n }\n\n /* Compute functions to be removed from the pipeline */\n const fns = difference([\n \"trimmer\", \"stopWordFilter\", \"stemmer\"\n ], options.pipeline)\n\n /* Remove functions from the pipeline for registered languages */\n for (const lang of config.lang.map(language => (\n language === \"en\" ? lunr : (lunr as any)[language]\n ))) {\n for (const fn of fns) {\n this.pipeline.remove(lang[fn])\n this.searchPipeline.remove(lang[fn])\n }\n }\n\n /* Set up reference */\n this.ref(\"location\")\n\n /* Set up fields */\n this.field(\"title\", { boost: 1e3 })\n this.field(\"text\")\n\n /* Index documents */\n for (const doc of docs)\n this.add(doc)\n })\n }\n\n /**\n * Search for matching documents\n *\n * The search index which MkDocs provides is divided up into articles, which\n * contain the whole content of the individual pages, and sections, which only\n * contain the contents of the subsections obtained by breaking the individual\n * pages up at `h1` ... `h6`. As there may be many sections on different pages\n * with identical titles (for example within this very project, e.g. \"Usage\"\n * or \"Installation\"), they need to be put into the context of the containing\n * page. For this reason, section results are grouped within their respective\n * articles which are the top-level results that are returned.\n *\n * @param query - Query value\n *\n * @returns Search results\n */\n public search(query: string): SearchResult {\n if (query) {\n try {\n const highlight = this.highlight(query)\n\n /* Parse query to extract clauses for analysis */\n const clauses = parseSearchQuery(query)\n .filter(clause => (\n clause.presence !== lunr.Query.presence.PROHIBITED\n ))\n\n /* Perform search and post-process results */\n const groups = this.index.search(`${query}*`)\n\n /* Apply post-query boosts based on title and search query terms */\n .reduce((item, { ref, score, matchData }) => {\n const document = this.documents.get(ref)\n if (typeof document !== \"undefined\") {\n const { location, title, text, parent } = document\n\n /* Compute and analyze search query terms */\n const terms = getSearchQueryTerms(\n clauses,\n Object.keys(matchData.metadata)\n )\n\n /* Highlight title and text and apply post-query boosts */\n const boost = +!parent + +Object.values(terms).every(t => t)\n item.push({\n location,\n title: highlight(title),\n text: highlight(text),\n score: score * (1 + boost),\n terms\n })\n }\n return item\n }, [])\n\n /* Sort search results again after applying boosts */\n .sort((a, b) => b.score - a.score)\n\n /* Group search results by page */\n .reduce((items, result) => {\n const document = this.documents.get(result.location)\n if (typeof document !== \"undefined\") {\n const ref = \"parent\" in document\n ? document.parent!.location\n : document.location\n items.set(ref, [...items.get(ref) || [], result])\n }\n return items\n }, new Map())\n\n /* Generate search suggestions, if desired */\n let suggestions: string[] | undefined\n if (this.options.suggestions) {\n const titles = this.index.query(builder => {\n for (const clause of clauses)\n builder.term(clause.term, {\n fields: [\"title\"],\n presence: lunr.Query.presence.REQUIRED,\n wildcard: lunr.Query.wildcard.TRAILING\n })\n })\n\n /* Retrieve suggestions for best match */\n suggestions = titles.length\n ? Object.keys(titles[0].matchData.metadata)\n : []\n }\n\n /* Return items and suggestions */\n return {\n items: [...groups.values()],\n ...typeof suggestions !== \"undefined\" && { suggestions }\n }\n\n /* Log errors to console (for now) */\n } catch {\n console.warn(`Invalid query: ${query} \u2013 see https://bit.ly/2s3ChXG`)\n }\n }\n\n /* Return nothing in case of error or empty query */\n return { items: [] }\n }\n}\n"], - "mappings": "kkCAAA;AAAA;AAAA;AAAA;AAAA,GAMC,AAAC,WAAU,CAiCZ,GAAI,GAAO,SAAU,EAAQ,CAC3B,GAAI,GAAU,GAAI,GAAK,QAEvB,SAAQ,SAAS,IACf,EAAK,QACL,EAAK,eACL,EAAK,SAGP,EAAQ,eAAe,IACrB,EAAK,SAGP,EAAO,KAAK,EAAS,GACd,EAAQ,SAGjB,EAAK,QAAU,QACf;AAAA;AAAA;AAAA,GASA,EAAK,MAAQ,GASb,EAAK,MAAM,KAAQ,SAAU,EAAQ,CAEnC,MAAO,UAAU,EAAS,CACxB,AAAI,EAAO,SAAW,QAAQ,MAC5B,QAAQ,KAAK,KAIhB,MAaH,EAAK,MAAM,SAAW,SAAU,EAAK,CACnC,MAAI,AAAkB,IAAQ,KACrB,GAEA,EAAI,YAoBf,EAAK,MAAM,MAAQ,SAAU,EAAK,CAChC,GAAI,GAAQ,KACV,MAAO,GAMT,OAHI,GAAQ,OAAO,OAAO,MACtB,EAAO,OAAO,KAAK,GAEd,EAAI,EAAG,EAAI,EAAK,OAAQ,IAAK,CACpC,GAAI,GAAM,EAAK,GACX,EAAM,EAAI,GAEd,GAAI,MAAM,QAAQ,GAAM,CACtB,EAAM,GAAO,EAAI,QACjB,SAGF,GAAI,MAAO,IAAQ,UACf,MAAO,IAAQ,UACf,MAAO,IAAQ,UAAW,CAC5B,EAAM,GAAO,EACb,SAGF,KAAM,IAAI,WAAU,yDAGtB,MAAO,IAET,EAAK,SAAW,SAAU,EAAQ,EAAW,EAAa,CACxD,KAAK,OAAS,EACd,KAAK,UAAY,EACjB,KAAK,aAAe,GAGtB,EAAK,SAAS,OAAS,IAEvB,EAAK,SAAS,WAAa,SAAU,EAAG,CACtC,GAAI,GAAI,EAAE,QAAQ,EAAK,SAAS,QAEhC,GAAI,IAAM,GACR,KAAM,6BAGR,GAAI,GAAW,EAAE,MAAM,EAAG,GACtB,EAAS,EAAE,MAAM,EAAI,GAEzB,MAAO,IAAI,GAAK,SAAU,EAAQ,EAAU,IAG9C,EAAK,SAAS,UAAU,SAAW,UAAY,CAC7C,MAAI,MAAK,cAAgB,MACvB,MAAK,aAAe,KAAK,UAAY,EAAK,SAAS,OAAS,KAAK,QAG5D,KAAK,cAEd;AAAA;AAAA;AAAA,GAUA,EAAK,IAAM,SAAU,EAAU,CAG7B,GAFA,KAAK,SAAW,OAAO,OAAO,MAE1B,EAAU,CACZ,KAAK,OAAS,EAAS,OAEvB,OAAS,GAAI,EAAG,EAAI,KAAK,OAAQ,IAC/B,KAAK,SAAS,EAAS,IAAM,OAG/B,MAAK,OAAS,GAWlB,EAAK,IAAI,SAAW,CAClB,UAAW,SAAU,EAAO,CAC1B,MAAO,IAGT,MAAO,UAAY,CACjB,MAAO,OAGT,SAAU,UAAY,CACpB,MAAO,KAWX,EAAK,IAAI,MAAQ,CACf,UAAW,UAAY,CACrB,MAAO,OAGT,MAAO,SAAU,EAAO,CACtB,MAAO,IAGT,SAAU,UAAY,CACpB,MAAO,KAUX,EAAK,IAAI,UAAU,SAAW,SAAU,EAAQ,CAC9C,MAAO,CAAC,CAAC,KAAK,SAAS,IAWzB,EAAK,IAAI,UAAU,UAAY,SAAU,EAAO,CAC9C,GAAI,GAAG,EAAG,EAAU,EAAe,GAEnC,GAAI,IAAU,EAAK,IAAI,SACrB,MAAO,MAGT,GAAI,IAAU,EAAK,IAAI,MACrB,MAAO,GAGT,AAAI,KAAK,OAAS,EAAM,OACtB,GAAI,KACJ,EAAI,GAEJ,GAAI,EACJ,EAAI,MAGN,EAAW,OAAO,KAAK,EAAE,UAEzB,OAAS,GAAI,EAAG,EAAI,EAAS,OAAQ,IAAK,CACxC,GAAI,GAAU,EAAS,GACvB,AAAI,IAAW,GAAE,UACf,EAAa,KAAK,GAItB,MAAO,IAAI,GAAK,IAAK,IAUvB,EAAK,IAAI,UAAU,MAAQ,SAAU,EAAO,CAC1C,MAAI,KAAU,EAAK,IAAI,SACd,EAAK,IAAI,SAGd,IAAU,EAAK,IAAI,MACd,KAGF,GAAI,GAAK,IAAI,OAAO,KAAK,KAAK,UAAU,OAAO,OAAO,KAAK,EAAM,aAU1E,EAAK,IAAM,SAAU,EAAS,EAAe,CAC3C,GAAI,GAAoB,EAExB,OAAS,KAAa,GACpB,AAAI,GAAa,UACjB,IAAqB,OAAO,KAAK,EAAQ,IAAY,QAGvD,GAAI,GAAK,GAAgB,EAAoB,IAAQ,GAAoB,IAEzE,MAAO,MAAK,IAAI,EAAI,KAAK,IAAI,KAW/B,EAAK,MAAQ,SAAU,EAAK,EAAU,CACpC,KAAK,IAAM,GAAO,GAClB,KAAK,SAAW,GAAY,IAQ9B,EAAK,MAAM,UAAU,SAAW,UAAY,CAC1C,MAAO,MAAK,KAuBd,EAAK,MAAM,UAAU,OAAS,SAAU,EAAI,CAC1C,YAAK,IAAM,EAAG,KAAK,IAAK,KAAK,UACtB,MAUT,EAAK,MAAM,UAAU,MAAQ,SAAU,EAAI,CACzC,SAAK,GAAM,SAAU,EAAG,CAAE,MAAO,IAC1B,GAAI,GAAK,MAAO,EAAG,KAAK,IAAK,KAAK,UAAW,KAAK,WAE3D;AAAA;AAAA;AAAA,GAuBA,EAAK,UAAY,SAAU,EAAK,EAAU,CACxC,GAAI,GAAO,MAAQ,GAAO,KACxB,MAAO,GAGT,GAAI,MAAM,QAAQ,GAChB,MAAO,GAAI,IAAI,SAAU,EAAG,CAC1B,MAAO,IAAI,GAAK,MACd,EAAK,MAAM,SAAS,GAAG,cACvB,EAAK,MAAM,MAAM,MASvB,OAJI,GAAM,EAAI,WAAW,cACrB,EAAM,EAAI,OACV,EAAS,GAEJ,EAAW,EAAG,EAAa,EAAG,GAAY,EAAK,IAAY,CAClE,GAAI,GAAO,EAAI,OAAO,GAClB,EAAc,EAAW,EAE7B,GAAK,EAAK,MAAM,EAAK,UAAU,YAAc,GAAY,EAAM,CAE7D,GAAI,EAAc,EAAG,CACnB,GAAI,GAAgB,EAAK,MAAM,MAAM,IAAa,GAClD,EAAc,SAAc,CAAC,EAAY,GACzC,EAAc,MAAW,EAAO,OAEhC,EAAO,KACL,GAAI,GAAK,MACP,EAAI,MAAM,EAAY,GACtB,IAKN,EAAa,EAAW,GAK5B,MAAO,IAUT,EAAK,UAAU,UAAY,UAC3B;AAAA;AAAA;AAAA,GAkCA,EAAK,SAAW,UAAY,CAC1B,KAAK,OAAS,IAGhB,EAAK,SAAS,oBAAsB,OAAO,OAAO,MAmClD,EAAK,SAAS,iBAAmB,SAAU,EAAI,EAAO,CACpD,AAAI,IAAS,MAAK,qBAChB,EAAK,MAAM,KAAK,6CAA+C,GAGjE,EAAG,MAAQ,EACX,EAAK,SAAS,oBAAoB,EAAG,OAAS,GAShD,EAAK,SAAS,4BAA8B,SAAU,EAAI,CACxD,GAAI,GAAe,EAAG,OAAU,EAAG,QAAS,MAAK,oBAEjD,AAAK,GACH,EAAK,MAAM,KAAK;AAAA,EAAmG,IAcvH,EAAK,SAAS,KAAO,SAAU,EAAY,CACzC,GAAI,GAAW,GAAI,GAAK,SAExB,SAAW,QAAQ,SAAU,EAAQ,CACnC,GAAI,GAAK,EAAK,SAAS,oBAAoB,GAE3C,GAAI,EACF,EAAS,IAAI,OAEb,MAAM,IAAI,OAAM,sCAAwC,KAIrD,GAUT,EAAK,SAAS,UAAU,IAAM,UAAY,CACxC,GAAI,GAAM,MAAM,UAAU,MAAM,KAAK,WAErC,EAAI,QAAQ,SAAU,EAAI,CACxB,EAAK,SAAS,4BAA4B,GAC1C,KAAK,OAAO,KAAK,IAChB,OAYL,EAAK,SAAS,UAAU,MAAQ,SAAU,EAAY,EAAO,CAC3D,EAAK,SAAS,4BAA4B,GAE1C,GAAI,GAAM,KAAK,OAAO,QAAQ,GAC9B,GAAI,GAAO,GACT,KAAM,IAAI,OAAM,0BAGlB,EAAM,EAAM,EACZ,KAAK,OAAO,OAAO,EAAK,EAAG,IAY7B,EAAK,SAAS,UAAU,OAAS,SAAU,EAAY,EAAO,CAC5D,EAAK,SAAS,4BAA4B,GAE1C,GAAI,GAAM,KAAK,OAAO,QAAQ,GAC9B,GAAI,GAAO,GACT,KAAM,IAAI,OAAM,0BAGlB,KAAK,OAAO,OAAO,EAAK,EAAG,IAQ7B,EAAK,SAAS,UAAU,OAAS,SAAU,EAAI,CAC7C,GAAI,GAAM,KAAK,OAAO,QAAQ,GAC9B,AAAI,GAAO,IAIX,KAAK,OAAO,OAAO,EAAK,IAU1B,EAAK,SAAS,UAAU,IAAM,SAAU,EAAQ,CAG9C,OAFI,GAAc,KAAK,OAAO,OAErB,EAAI,EAAG,EAAI,EAAa,IAAK,CAIpC,OAHI,GAAK,KAAK,OAAO,GACjB,EAAO,GAEF,EAAI,EAAG,EAAI,EAAO,OAAQ,IAAK,CACtC,GAAI,GAAS,EAAG,EAAO,GAAI,EAAG,GAE9B,GAAI,KAAW,MAA6B,IAAW,IAEvD,GAAI,MAAM,QAAQ,GAChB,OAAS,GAAI,EAAG,EAAI,EAAO,OAAQ,IACjC,EAAK,KAAK,EAAO,QAGnB,GAAK,KAAK,GAId,EAAS,EAGX,MAAO,IAaT,EAAK,SAAS,UAAU,UAAY,SAAU,EAAK,EAAU,CAC3D,GAAI,GAAQ,GAAI,GAAK,MAAO,EAAK,GAEjC,MAAO,MAAK,IAAI,CAAC,IAAQ,IAAI,SAAU,EAAG,CACxC,MAAO,GAAE,cAQb,EAAK,SAAS,UAAU,MAAQ,UAAY,CAC1C,KAAK,OAAS,IAUhB,EAAK,SAAS,UAAU,OAAS,UAAY,CAC3C,MAAO,MAAK,OAAO,IAAI,SAAU,EAAI,CACnC,SAAK,SAAS,4BAA4B,GAEnC,EAAG,SAGd;AAAA;AAAA;AAAA,GAqBA,EAAK,OAAS,SAAU,EAAU,CAChC,KAAK,WAAa,EAClB,KAAK,SAAW,GAAY,IAc9B,EAAK,OAAO,UAAU,iBAAmB,SAAU,EAAO,CAExD,GAAI,KAAK,SAAS,QAAU,EAC1B,MAAO,GAST,OANI,GAAQ,EACR,EAAM,KAAK,SAAS,OAAS,EAC7B,EAAc,EAAM,EACpB,EAAa,KAAK,MAAM,EAAc,GACtC,EAAa,KAAK,SAAS,EAAa,GAErC,EAAc,GACf,GAAa,GACf,GAAQ,GAGN,EAAa,GACf,GAAM,GAGJ,GAAc,IAIlB,EAAc,EAAM,EACpB,EAAa,EAAQ,KAAK,MAAM,EAAc,GAC9C,EAAa,KAAK,SAAS,EAAa,GAO1C,GAJI,GAAc,GAId,EAAa,EACf,MAAO,GAAa,EAGtB,GAAI,EAAa,EACf,MAAQ,GAAa,GAAK,GAa9B,EAAK,OAAO,UAAU,OAAS,SAAU,EAAW,EAAK,CACvD,KAAK,OAAO,EAAW,EAAK,UAAY,CACtC,KAAM,qBAYV,EAAK,OAAO,UAAU,OAAS,SAAU,EAAW,EAAK,EAAI,CAC3D,KAAK,WAAa,EAClB,GAAI,GAAW,KAAK,iBAAiB,GAErC,AAAI,KAAK,SAAS,IAAa,EAC7B,KAAK,SAAS,EAAW,GAAK,EAAG,KAAK,SAAS,EAAW,GAAI,GAE9D,KAAK,SAAS,OAAO,EAAU,EAAG,EAAW,IASjD,EAAK,OAAO,UAAU,UAAY,UAAY,CAC5C,GAAI,KAAK,WAAY,MAAO,MAAK,WAKjC,OAHI,GAAe,EACf,EAAiB,KAAK,SAAS,OAE1B,EAAI,EAAG,EAAI,EAAgB,GAAK,EAAG,CAC1C,GAAI,GAAM,KAAK,SAAS,GACxB,GAAgB,EAAM,EAGxB,MAAO,MAAK,WAAa,KAAK,KAAK,IASrC,EAAK,OAAO,UAAU,IAAM,SAAU,EAAa,CAOjD,OANI,GAAa,EACb,EAAI,KAAK,SAAU,EAAI,EAAY,SACnC,EAAO,EAAE,OAAQ,EAAO,EAAE,OAC1B,EAAO,EAAG,EAAO,EACjB,EAAI,EAAG,EAAI,EAER,EAAI,GAAQ,EAAI,GACrB,EAAO,EAAE,GAAI,EAAO,EAAE,GACtB,AAAI,EAAO,EACT,GAAK,EACA,AAAI,EAAO,EAChB,GAAK,EACI,GAAQ,GACjB,IAAc,EAAE,EAAI,GAAK,EAAE,EAAI,GAC/B,GAAK,EACL,GAAK,GAIT,MAAO,IAUT,EAAK,OAAO,UAAU,WAAa,SAAU,EAAa,CACxD,MAAO,MAAK,IAAI,GAAe,KAAK,aAAe,GAQrD,EAAK,OAAO,UAAU,QAAU,UAAY,CAG1C,OAFI,GAAS,GAAI,OAAO,KAAK,SAAS,OAAS,GAEtC,EAAI,EAAG,EAAI,EAAG,EAAI,KAAK,SAAS,OAAQ,GAAK,EAAG,IACvD,EAAO,GAAK,KAAK,SAAS,GAG5B,MAAO,IAQT,EAAK,OAAO,UAAU,OAAS,UAAY,CACzC,MAAO,MAAK,UAGd;AAAA;AAAA;AAAA;AAAA,GAiBA,EAAK,QAAW,UAAU,CACxB,GAAI,GAAY,CACZ,QAAY,MACZ,OAAW,OACX,KAAS,OACT,KAAS,OACT,KAAS,MACT,IAAQ,MACR,KAAS,KACT,MAAU,MACV,IAAQ,IACR,MAAU,MACV,QAAY,MACZ,MAAU,MACV,KAAS,MACT,MAAU,KACV,QAAY,MACZ,QAAY,MACZ,QAAY,MACZ,MAAU,KACV,MAAU,MACV,OAAW,MACX,KAAS,OAGX,EAAY,CACV,MAAU,KACV,MAAU,GACV,MAAU,KACV,MAAU,KACV,KAAS,KACT,IAAQ,GACR,KAAS,IAGX,EAAI,WACJ,EAAI,WACJ,EAAI,EAAI,aACR,EAAI,EAAI,WAER,EAAO,KAAO,EAAI,KAAO,EAAI,EAC7B,EAAO,KAAO,EAAI,KAAO,EAAI,EAAI,IAAM,EAAI,MAC3C,EAAO,KAAO,EAAI,KAAO,EAAI,EAAI,EAAI,EACrC,EAAM,KAAO,EAAI,KAAO,EAEtB,EAAU,GAAI,QAAO,GACrB,EAAU,GAAI,QAAO,GACrB,EAAU,GAAI,QAAO,GACrB,EAAS,GAAI,QAAO,GAEpB,EAAQ,kBACR,EAAS,iBACT,EAAQ,aACR,EAAS,kBACT,EAAU,KACV,EAAW,cACX,EAAW,GAAI,QAAO,sBACtB,EAAW,GAAI,QAAO,IAAM,EAAI,EAAI,gBAEpC,EAAQ,mBACR,EAAO,2IAEP,EAAO,iDAEP,EAAO,sFACP,EAAQ,oBAER,EAAO,WACP,EAAS,MACT,EAAQ,GAAI,QAAO,IAAM,EAAI,EAAI,gBAEjC,EAAgB,SAAuB,EAAG,CAC5C,GAAI,GACF,EACA,EACA,EACA,EACA,EACA,EAEF,GAAI,EAAE,OAAS,EAAK,MAAO,GAiB3B,GAfA,EAAU,EAAE,OAAO,EAAE,GACjB,GAAW,KACb,GAAI,EAAQ,cAAgB,EAAE,OAAO,IAIvC,EAAK,EACL,EAAM,EAEN,AAAI,EAAG,KAAK,GAAM,EAAI,EAAE,QAAQ,EAAG,QAC1B,EAAI,KAAK,IAAM,GAAI,EAAE,QAAQ,EAAI,SAG1C,EAAK,EACL,EAAM,EACF,EAAG,KAAK,GAAI,CACd,GAAI,GAAK,EAAG,KAAK,GACjB,EAAK,EACD,EAAG,KAAK,EAAG,KACb,GAAK,EACL,EAAI,EAAE,QAAQ,EAAG,aAEV,EAAI,KAAK,GAAI,CACtB,GAAI,GAAK,EAAI,KAAK,GAClB,EAAO,EAAG,GACV,EAAM,EACF,EAAI,KAAK,IACX,GAAI,EACJ,EAAM,EACN,EAAM,EACN,EAAM,EACN,AAAI,EAAI,KAAK,GAAM,EAAI,EAAI,IACtB,AAAI,EAAI,KAAK,GAAM,GAAK,EAAS,EAAI,EAAE,QAAQ,EAAG,KAC9C,EAAI,KAAK,IAAM,GAAI,EAAI,MAMpC,GADA,EAAK,EACD,EAAG,KAAK,GAAI,CACd,GAAI,GAAK,EAAG,KAAK,GACjB,EAAO,EAAG,GACV,EAAI,EAAO,IAKb,GADA,EAAK,EACD,EAAG,KAAK,GAAI,CACd,GAAI,GAAK,EAAG,KAAK,GACjB,EAAO,EAAG,GACV,EAAS,EAAG,GACZ,EAAK,EACD,EAAG,KAAK,IACV,GAAI,EAAO,EAAU,IAMzB,GADA,EAAK,EACD,EAAG,KAAK,GAAI,CACd,GAAI,GAAK,EAAG,KAAK,GACjB,EAAO,EAAG,GACV,EAAS,EAAG,GACZ,EAAK,EACD,EAAG,KAAK,IACV,GAAI,EAAO,EAAU,IAOzB,GAFA,EAAK,EACL,EAAM,EACF,EAAG,KAAK,GAAI,CACd,GAAI,GAAK,EAAG,KAAK,GACjB,EAAO,EAAG,GACV,EAAK,EACD,EAAG,KAAK,IACV,GAAI,WAEG,EAAI,KAAK,GAAI,CACtB,GAAI,GAAK,EAAI,KAAK,GAClB,EAAO,EAAG,GAAK,EAAG,GAClB,EAAM,EACF,EAAI,KAAK,IACX,GAAI,GAMR,GADA,EAAK,EACD,EAAG,KAAK,GAAI,CACd,GAAI,GAAK,EAAG,KAAK,GACjB,EAAO,EAAG,GACV,EAAK,EACL,EAAM,EACN,EAAM,EACF,GAAG,KAAK,IAAU,EAAI,KAAK,IAAS,CAAE,EAAI,KAAK,KACjD,GAAI,GAIR,SAAK,EACL,EAAM,EACF,EAAG,KAAK,IAAM,EAAI,KAAK,IACzB,GAAK,EACL,EAAI,EAAE,QAAQ,EAAG,KAKf,GAAW,KACb,GAAI,EAAQ,cAAgB,EAAE,OAAO,IAGhC,GAGT,MAAO,UAAU,EAAO,CACtB,MAAO,GAAM,OAAO,OAIxB,EAAK,SAAS,iBAAiB,EAAK,QAAS,WAC7C;AAAA;AAAA;AAAA,GAkBA,EAAK,uBAAyB,SAAU,EAAW,CACjD,GAAI,GAAQ,EAAU,OAAO,SAAU,EAAM,EAAU,CACrD,SAAK,GAAY,EACV,GACN,IAEH,MAAO,UAAU,EAAO,CACtB,GAAI,GAAS,EAAM,EAAM,cAAgB,EAAM,WAAY,MAAO,KAiBtE,EAAK,eAAiB,EAAK,uBAAuB,CAChD,IACA,OACA,QACA,SACA,QACA,MACA,SACA,OACA,KACA,QACA,KACA,MACA,MACA,MACA,KACA,KACA,KACA,UACA,OACA,MACA,KACA,MACA,SACA,QACA,OACA,MACA,KACA,OACA,SACA,OACA,OACA,QACA,MACA,OACA,MACA,MACA,MACA,MACA,OACA,KACA,MACA,OACA,MACA,MACA,MACA,UACA,IACA,KACA,KACA,OACA,KACA,KACA,MACA,OACA,QACA,MACA,OACA,SACA,MACA,KACA,QACA,OACA,OACA,KACA,UACA,KACA,MACA,MACA,KACA,MACA,QACA,KACA,OACA,KACA,QACA,MACA,MACA,SACA,OACA,MACA,OACA,MACA,SACA,QACA,KACA,OACA,OACA,OACA,MACA,QACA,OACA,OACA,QACA,QACA,OACA,OACA,MACA,KACA,MACA,OACA,KACA,QACA,MACA,KACA,OACA,OACA,OACA,QACA,QACA,QACA,MACA,OACA,MACA,OACA,OACA,QACA,MACA,MACA,SAGF,EAAK,SAAS,iBAAiB,EAAK,eAAgB,kBACpD;AAAA;AAAA;AAAA,GAoBA,EAAK,QAAU,SAAU,EAAO,CAC9B,MAAO,GAAM,OAAO,SAAU,EAAG,CAC/B,MAAO,GAAE,QAAQ,OAAQ,IAAI,QAAQ,OAAQ,OAIjD,EAAK,SAAS,iBAAiB,EAAK,QAAS,WAC7C;AAAA;AAAA;AAAA,GA0BA,EAAK,SAAW,UAAY,CAC1B,KAAK,MAAQ,GACb,KAAK,MAAQ,GACb,KAAK,GAAK,EAAK,SAAS,QACxB,EAAK,SAAS,SAAW,GAW3B,EAAK,SAAS,QAAU,EASxB,EAAK,SAAS,UAAY,SAAU,EAAK,CAGvC,OAFI,GAAU,GAAI,GAAK,SAAS,QAEvB,EAAI,EAAG,EAAM,EAAI,OAAQ,EAAI,EAAK,IACzC,EAAQ,OAAO,EAAI,IAGrB,SAAQ,SACD,EAAQ,MAYjB,EAAK,SAAS,WAAa,SAAU,EAAQ,CAC3C,MAAI,gBAAkB,GACb,EAAK,SAAS,gBAAgB,EAAO,KAAM,EAAO,cAElD,EAAK,SAAS,WAAW,EAAO,OAmB3C,EAAK,SAAS,gBAAkB,SAAU,EAAK,EAAc,CAS3D,OARI,GAAO,GAAI,GAAK,SAEhB,EAAQ,CAAC,CACX,KAAM,EACN,eAAgB,EAChB,IAAK,IAGA,EAAM,QAAQ,CACnB,GAAI,GAAQ,EAAM,MAGlB,GAAI,EAAM,IAAI,OAAS,EAAG,CACxB,GAAI,GAAO,EAAM,IAAI,OAAO,GACxB,EAEJ,AAAI,IAAQ,GAAM,KAAK,MACrB,EAAa,EAAM,KAAK,MAAM,GAE9B,GAAa,GAAI,GAAK,SACtB,EAAM,KAAK,MAAM,GAAQ,GAGvB,EAAM,IAAI,QAAU,GACtB,GAAW,MAAQ,IAGrB,EAAM,KAAK,CACT,KAAM,EACN,eAAgB,EAAM,eACtB,IAAK,EAAM,IAAI,MAAM,KAIzB,GAAI,EAAM,gBAAkB,EAK5B,IAAI,KAAO,GAAM,KAAK,MACpB,GAAI,GAAgB,EAAM,KAAK,MAAM,SAChC,CACL,GAAI,GAAgB,GAAI,GAAK,SAC7B,EAAM,KAAK,MAAM,KAAO,EAiC1B,GA9BI,EAAM,IAAI,QAAU,GACtB,GAAc,MAAQ,IAGxB,EAAM,KAAK,CACT,KAAM,EACN,eAAgB,EAAM,eAAiB,EACvC,IAAK,EAAM,MAMT,EAAM,IAAI,OAAS,GACrB,EAAM,KAAK,CACT,KAAM,EAAM,KACZ,eAAgB,EAAM,eAAiB,EACvC,IAAK,EAAM,IAAI,MAAM,KAMrB,EAAM,IAAI,QAAU,GACtB,GAAM,KAAK,MAAQ,IAMjB,EAAM,IAAI,QAAU,EAAG,CACzB,GAAI,KAAO,GAAM,KAAK,MACpB,GAAI,GAAmB,EAAM,KAAK,MAAM,SACnC,CACL,GAAI,GAAmB,GAAI,GAAK,SAChC,EAAM,KAAK,MAAM,KAAO,EAG1B,AAAI,EAAM,IAAI,QAAU,GACtB,GAAiB,MAAQ,IAG3B,EAAM,KAAK,CACT,KAAM,EACN,eAAgB,EAAM,eAAiB,EACvC,IAAK,EAAM,IAAI,MAAM,KAOzB,GAAI,EAAM,IAAI,OAAS,EAAG,CACxB,GAAI,GAAQ,EAAM,IAAI,OAAO,GACzB,EAAQ,EAAM,IAAI,OAAO,GACzB,EAEJ,AAAI,IAAS,GAAM,KAAK,MACtB,EAAgB,EAAM,KAAK,MAAM,GAEjC,GAAgB,GAAI,GAAK,SACzB,EAAM,KAAK,MAAM,GAAS,GAGxB,EAAM,IAAI,QAAU,GACtB,GAAc,MAAQ,IAGxB,EAAM,KAAK,CACT,KAAM,EACN,eAAgB,EAAM,eAAiB,EACvC,IAAK,EAAQ,EAAM,IAAI,MAAM,OAKnC,MAAO,IAaT,EAAK,SAAS,WAAa,SAAU,EAAK,CAYxC,OAXI,GAAO,GAAI,GAAK,SAChB,EAAO,EAUF,EAAI,EAAG,EAAM,EAAI,OAAQ,EAAI,EAAK,IAAK,CAC9C,GAAI,GAAO,EAAI,GACX,EAAS,GAAK,EAAM,EAExB,GAAI,GAAQ,IACV,EAAK,MAAM,GAAQ,EACnB,EAAK,MAAQ,MAER,CACL,GAAI,GAAO,GAAI,GAAK,SACpB,EAAK,MAAQ,EAEb,EAAK,MAAM,GAAQ,EACnB,EAAO,GAIX,MAAO,IAaT,EAAK,SAAS,UAAU,QAAU,UAAY,CAQ5C,OAPI,GAAQ,GAER,EAAQ,CAAC,CACX,OAAQ,GACR,KAAM,OAGD,EAAM,QAAQ,CACnB,GAAI,GAAQ,EAAM,MACd,EAAQ,OAAO,KAAK,EAAM,KAAK,OAC/B,EAAM,EAAM,OAEhB,AAAI,EAAM,KAAK,OAKb,GAAM,OAAO,OAAO,GACpB,EAAM,KAAK,EAAM,SAGnB,OAAS,GAAI,EAAG,EAAI,EAAK,IAAK,CAC5B,GAAI,GAAO,EAAM,GAEjB,EAAM,KAAK,CACT,OAAQ,EAAM,OAAO,OAAO,GAC5B,KAAM,EAAM,KAAK,MAAM,MAK7B,MAAO,IAaT,EAAK,SAAS,UAAU,SAAW,UAAY,CAS7C,GAAI,KAAK,KACP,MAAO,MAAK,KAOd,OAJI,GAAM,KAAK,MAAQ,IAAM,IACzB,EAAS,OAAO,KAAK,KAAK,OAAO,OACjC,EAAM,EAAO,OAER,EAAI,EAAG,EAAI,EAAK,IAAK,CAC5B,GAAI,GAAQ,EAAO,GACf,EAAO,KAAK,MAAM,GAEtB,EAAM,EAAM,EAAQ,EAAK,GAG3B,MAAO,IAaT,EAAK,SAAS,UAAU,UAAY,SAAU,EAAG,CAU/C,OATI,GAAS,GAAI,GAAK,SAClB,EAAQ,OAER,EAAQ,CAAC,CACX,MAAO,EACP,OAAQ,EACR,KAAM,OAGD,EAAM,QAAQ,CACnB,EAAQ,EAAM,MAWd,OALI,GAAS,OAAO,KAAK,EAAM,MAAM,OACjC,EAAO,EAAO,OACd,EAAS,OAAO,KAAK,EAAM,KAAK,OAChC,EAAO,EAAO,OAET,EAAI,EAAG,EAAI,EAAM,IAGxB,OAFI,GAAQ,EAAO,GAEV,EAAI,EAAG,EAAI,EAAM,IAAK,CAC7B,GAAI,GAAQ,EAAO,GAEnB,GAAI,GAAS,GAAS,GAAS,IAAK,CAClC,GAAI,GAAO,EAAM,KAAK,MAAM,GACxB,EAAQ,EAAM,MAAM,MAAM,GAC1B,EAAQ,EAAK,OAAS,EAAM,MAC5B,EAAO,OAEX,AAAI,IAAS,GAAM,OAAO,MAIxB,GAAO,EAAM,OAAO,MAAM,GAC1B,EAAK,MAAQ,EAAK,OAAS,GAM3B,GAAO,GAAI,GAAK,SAChB,EAAK,MAAQ,EACb,EAAM,OAAO,MAAM,GAAS,GAG9B,EAAM,KAAK,CACT,MAAO,EACP,OAAQ,EACR,KAAM,MAOhB,MAAO,IAET,EAAK,SAAS,QAAU,UAAY,CAClC,KAAK,aAAe,GACpB,KAAK,KAAO,GAAI,GAAK,SACrB,KAAK,eAAiB,GACtB,KAAK,eAAiB,IAGxB,EAAK,SAAS,QAAQ,UAAU,OAAS,SAAU,EAAM,CACvD,GAAI,GACA,EAAe,EAEnB,GAAI,EAAO,KAAK,aACd,KAAM,IAAI,OAAO,+BAGnB,OAAS,GAAI,EAAG,EAAI,EAAK,QAAU,EAAI,KAAK,aAAa,QACnD,EAAK,IAAM,KAAK,aAAa,GAD8B,IAE/D,IAGF,KAAK,SAAS,GAEd,AAAI,KAAK,eAAe,QAAU,EAChC,EAAO,KAAK,KAEZ,EAAO,KAAK,eAAe,KAAK,eAAe,OAAS,GAAG,MAG7D,OAAS,GAAI,EAAc,EAAI,EAAK,OAAQ,IAAK,CAC/C,GAAI,GAAW,GAAI,GAAK,SACpB,EAAO,EAAK,GAEhB,EAAK,MAAM,GAAQ,EAEnB,KAAK,eAAe,KAAK,CACvB,OAAQ,EACR,KAAM,EACN,MAAO,IAGT,EAAO,EAGT,EAAK,MAAQ,GACb,KAAK,aAAe,GAGtB,EAAK,SAAS,QAAQ,UAAU,OAAS,UAAY,CACnD,KAAK,SAAS,IAGhB,EAAK,SAAS,QAAQ,UAAU,SAAW,SAAU,EAAQ,CAC3D,OAAS,GAAI,KAAK,eAAe,OAAS,EAAG,GAAK,EAAQ,IAAK,CAC7D,GAAI,GAAO,KAAK,eAAe,GAC3B,EAAW,EAAK,MAAM,WAE1B,AAAI,IAAY,MAAK,eACnB,EAAK,OAAO,MAAM,EAAK,MAAQ,KAAK,eAAe,GAInD,GAAK,MAAM,KAAO,EAElB,KAAK,eAAe,GAAY,EAAK,OAGvC,KAAK,eAAe,QAGxB;AAAA;AAAA;AAAA,GAqBA,EAAK,MAAQ,SAAU,EAAO,CAC5B,KAAK,cAAgB,EAAM,cAC3B,KAAK,aAAe,EAAM,aAC1B,KAAK,SAAW,EAAM,SACtB,KAAK,OAAS,EAAM,OACpB,KAAK,SAAW,EAAM,UA0ExB,EAAK,MAAM,UAAU,OAAS,SAAU,EAAa,CACnD,MAAO,MAAK,MAAM,SAAU,EAAO,CACjC,GAAI,GAAS,GAAI,GAAK,YAAY,EAAa,GAC/C,EAAO,WA6BX,EAAK,MAAM,UAAU,MAAQ,SAAU,EAAI,CAoBzC,OAZI,GAAQ,GAAI,GAAK,MAAM,KAAK,QAC5B,EAAiB,OAAO,OAAO,MAC/B,EAAe,OAAO,OAAO,MAC7B,EAAiB,OAAO,OAAO,MAC/B,EAAkB,OAAO,OAAO,MAChC,EAAoB,OAAO,OAAO,MAO7B,EAAI,EAAG,EAAI,KAAK,OAAO,OAAQ,IACtC,EAAa,KAAK,OAAO,IAAM,GAAI,GAAK,OAG1C,EAAG,KAAK,EAAO,GAEf,OAAS,GAAI,EAAG,EAAI,EAAM,QAAQ,OAAQ,IAAK,CAS7C,GAAI,GAAS,EAAM,QAAQ,GACvB,EAAQ,KACR,EAAgB,EAAK,IAAI,MAE7B,AAAI,EAAO,YACT,EAAQ,KAAK,SAAS,UAAU,EAAO,KAAM,CAC3C,OAAQ,EAAO,SAGjB,EAAQ,CAAC,EAAO,MAGlB,OAAS,GAAI,EAAG,EAAI,EAAM,OAAQ,IAAK,CACrC,GAAI,GAAO,EAAM,GAQjB,EAAO,KAAO,EAOd,GAAI,GAAe,EAAK,SAAS,WAAW,GACxC,EAAgB,KAAK,SAAS,UAAU,GAAc,UAQ1D,GAAI,EAAc,SAAW,GAAK,EAAO,WAAa,EAAK,MAAM,SAAS,SAAU,CAClF,OAAS,GAAI,EAAG,EAAI,EAAO,OAAO,OAAQ,IAAK,CAC7C,GAAI,GAAQ,EAAO,OAAO,GAC1B,EAAgB,GAAS,EAAK,IAAI,MAGpC,MAGF,OAAS,GAAI,EAAG,EAAI,EAAc,OAAQ,IASxC,OAJI,GAAe,EAAc,GAC7B,EAAU,KAAK,cAAc,GAC7B,EAAY,EAAQ,OAEf,EAAI,EAAG,EAAI,EAAO,OAAO,OAAQ,IAAK,CAS7C,GAAI,GAAQ,EAAO,OAAO,GACtB,EAAe,EAAQ,GACvB,EAAuB,OAAO,KAAK,GACnC,EAAY,EAAe,IAAM,EACjC,EAAuB,GAAI,GAAK,IAAI,GAoBxC,GAbI,EAAO,UAAY,EAAK,MAAM,SAAS,UACzC,GAAgB,EAAc,MAAM,GAEhC,EAAgB,KAAW,QAC7B,GAAgB,GAAS,EAAK,IAAI,WASlC,EAAO,UAAY,EAAK,MAAM,SAAS,WAAY,CACrD,AAAI,EAAkB,KAAW,QAC/B,GAAkB,GAAS,EAAK,IAAI,OAGtC,EAAkB,GAAS,EAAkB,GAAO,MAAM,GAO1D,SAgBF,GANA,EAAa,GAAO,OAAO,EAAW,EAAO,MAAO,SAAU,GAAG,GAAG,CAAE,MAAO,IAAI,KAM7E,GAAe,GAInB,QAAS,GAAI,EAAG,EAAI,EAAqB,OAAQ,IAAK,CAOpD,GAAI,GAAsB,EAAqB,GAC3C,EAAmB,GAAI,GAAK,SAAU,EAAqB,GAC3D,EAAW,EAAa,GACxB,EAEJ,AAAK,GAAa,EAAe,MAAuB,OACtD,EAAe,GAAoB,GAAI,GAAK,UAAW,EAAc,EAAO,GAE5E,EAAW,IAAI,EAAc,EAAO,GAKxC,EAAe,GAAa,KAWlC,GAAI,EAAO,WAAa,EAAK,MAAM,SAAS,SAC1C,OAAS,GAAI,EAAG,EAAI,EAAO,OAAO,OAAQ,IAAK,CAC7C,GAAI,GAAQ,EAAO,OAAO,GAC1B,EAAgB,GAAS,EAAgB,GAAO,UAAU,IAahE,OAHI,GAAqB,EAAK,IAAI,SAC9B,EAAuB,EAAK,IAAI,MAE3B,EAAI,EAAG,EAAI,KAAK,OAAO,OAAQ,IAAK,CAC3C,GAAI,GAAQ,KAAK,OAAO,GAExB,AAAI,EAAgB,IAClB,GAAqB,EAAmB,UAAU,EAAgB,KAGhE,EAAkB,IACpB,GAAuB,EAAqB,MAAM,EAAkB,KAIxE,GAAI,GAAoB,OAAO,KAAK,GAChC,EAAU,GACV,EAAU,OAAO,OAAO,MAY5B,GAAI,EAAM,YAAa,CACrB,EAAoB,OAAO,KAAK,KAAK,cAErC,OAAS,GAAI,EAAG,EAAI,EAAkB,OAAQ,IAAK,CACjD,GAAI,GAAmB,EAAkB,GACrC,EAAW,EAAK,SAAS,WAAW,GACxC,EAAe,GAAoB,GAAI,GAAK,WAIhD,OAAS,GAAI,EAAG,EAAI,EAAkB,OAAQ,IAAK,CASjD,GAAI,GAAW,EAAK,SAAS,WAAW,EAAkB,IACtD,EAAS,EAAS,OAEtB,GAAI,EAAC,EAAmB,SAAS,IAI7B,GAAqB,SAAS,GAIlC,IAAI,GAAc,KAAK,aAAa,GAChC,EAAQ,EAAa,EAAS,WAAW,WAAW,GACpD,EAEJ,GAAK,GAAW,EAAQ,MAAa,OACnC,EAAS,OAAS,EAClB,EAAS,UAAU,QAAQ,EAAe,QACrC,CACL,GAAI,GAAQ,CACV,IAAK,EACL,MAAO,EACP,UAAW,EAAe,IAE5B,EAAQ,GAAU,EAClB,EAAQ,KAAK,KAOjB,MAAO,GAAQ,KAAK,SAAU,GAAG,GAAG,CAClC,MAAO,IAAE,MAAQ,GAAE,SAYvB,EAAK,MAAM,UAAU,OAAS,UAAY,CACxC,GAAI,GAAgB,OAAO,KAAK,KAAK,eAClC,OACA,IAAI,SAAU,EAAM,CACnB,MAAO,CAAC,EAAM,KAAK,cAAc,KAChC,MAED,EAAe,OAAO,KAAK,KAAK,cACjC,IAAI,SAAU,EAAK,CAClB,MAAO,CAAC,EAAK,KAAK,aAAa,GAAK,WACnC,MAEL,MAAO,CACL,QAAS,EAAK,QACd,OAAQ,KAAK,OACb,aAAc,EACd,cAAe,EACf,SAAU,KAAK,SAAS,WAU5B,EAAK,MAAM,KAAO,SAAU,EAAiB,CAC3C,GAAI,GAAQ,GACR,EAAe,GACf,EAAoB,EAAgB,aACpC,EAAgB,OAAO,OAAO,MAC9B,EAA0B,EAAgB,cAC1C,EAAkB,GAAI,GAAK,SAAS,QACpC,EAAW,EAAK,SAAS,KAAK,EAAgB,UAElD,AAAI,EAAgB,SAAW,EAAK,SAClC,EAAK,MAAM,KAAK,4EAA8E,EAAK,QAAU,sCAAwC,EAAgB,QAAU,KAGjL,OAAS,GAAI,EAAG,EAAI,EAAkB,OAAQ,IAAK,CACjD,GAAI,GAAQ,EAAkB,GAC1B,EAAM,EAAM,GACZ,EAAW,EAAM,GAErB,EAAa,GAAO,GAAI,GAAK,OAAO,GAGtC,OAAS,GAAI,EAAG,EAAI,EAAwB,OAAQ,IAAK,CACvD,GAAI,GAAQ,EAAwB,GAChC,EAAO,EAAM,GACb,EAAU,EAAM,GAEpB,EAAgB,OAAO,GACvB,EAAc,GAAQ,EAGxB,SAAgB,SAEhB,EAAM,OAAS,EAAgB,OAE/B,EAAM,aAAe,EACrB,EAAM,cAAgB,EACtB,EAAM,SAAW,EAAgB,KACjC,EAAM,SAAW,EAEV,GAAI,GAAK,MAAM,IAExB;AAAA;AAAA;AAAA,GA6BA,EAAK,QAAU,UAAY,CACzB,KAAK,KAAO,KACZ,KAAK,QAAU,OAAO,OAAO,MAC7B,KAAK,WAAa,OAAO,OAAO,MAChC,KAAK,cAAgB,OAAO,OAAO,MACnC,KAAK,qBAAuB,GAC5B,KAAK,aAAe,GACpB,KAAK,UAAY,EAAK,UACtB,KAAK,SAAW,GAAI,GAAK,SACzB,KAAK,eAAiB,GAAI,GAAK,SAC/B,KAAK,cAAgB,EACrB,KAAK,GAAK,IACV,KAAK,IAAM,IACX,KAAK,UAAY,EACjB,KAAK,kBAAoB,IAe3B,EAAK,QAAQ,UAAU,IAAM,SAAU,EAAK,CAC1C,KAAK,KAAO,GAmCd,EAAK,QAAQ,UAAU,MAAQ,SAAU,EAAW,EAAY,CAC9D,GAAI,KAAK,KAAK,GACZ,KAAM,IAAI,YAAY,UAAY,EAAY,oCAGhD,KAAK,QAAQ,GAAa,GAAc,IAW1C,EAAK,QAAQ,UAAU,EAAI,SAAU,EAAQ,CAC3C,AAAI,EAAS,EACX,KAAK,GAAK,EACL,AAAI,EAAS,EAClB,KAAK,GAAK,EAEV,KAAK,GAAK,GAWd,EAAK,QAAQ,UAAU,GAAK,SAAU,EAAQ,CAC5C,KAAK,IAAM,GAoBb,EAAK,QAAQ,UAAU,IAAM,SAAU,EAAK,EAAY,CACtD,GAAI,GAAS,EAAI,KAAK,MAClB,EAAS,OAAO,KAAK,KAAK,SAE9B,KAAK,WAAW,GAAU,GAAc,GACxC,KAAK,eAAiB,EAEtB,OAAS,GAAI,EAAG,EAAI,EAAO,OAAQ,IAAK,CACtC,GAAI,GAAY,EAAO,GACnB,EAAY,KAAK,QAAQ,GAAW,UACpC,EAAQ,EAAY,EAAU,GAAO,EAAI,GACzC,EAAS,KAAK,UAAU,EAAO,CAC7B,OAAQ,CAAC,KAEX,EAAQ,KAAK,SAAS,IAAI,GAC1B,EAAW,GAAI,GAAK,SAAU,EAAQ,GACtC,EAAa,OAAO,OAAO,MAE/B,KAAK,qBAAqB,GAAY,EACtC,KAAK,aAAa,GAAY,EAG9B,KAAK,aAAa,IAAa,EAAM,OAGrC,OAAS,GAAI,EAAG,EAAI,EAAM,OAAQ,IAAK,CACrC,GAAI,GAAO,EAAM,GAUjB,GARI,EAAW,IAAS,MACtB,GAAW,GAAQ,GAGrB,EAAW,IAAS,EAIhB,KAAK,cAAc,IAAS,KAAW,CACzC,GAAI,GAAU,OAAO,OAAO,MAC5B,EAAQ,OAAY,KAAK,UACzB,KAAK,WAAa,EAElB,OAAS,GAAI,EAAG,EAAI,EAAO,OAAQ,IACjC,EAAQ,EAAO,IAAM,OAAO,OAAO,MAGrC,KAAK,cAAc,GAAQ,EAI7B,AAAI,KAAK,cAAc,GAAM,GAAW,IAAW,MACjD,MAAK,cAAc,GAAM,GAAW,GAAU,OAAO,OAAO,OAK9D,OAAS,GAAI,EAAG,EAAI,KAAK,kBAAkB,OAAQ,IAAK,CACtD,GAAI,GAAc,KAAK,kBAAkB,GACrC,EAAW,EAAK,SAAS,GAE7B,AAAI,KAAK,cAAc,GAAM,GAAW,GAAQ,IAAgB,MAC9D,MAAK,cAAc,GAAM,GAAW,GAAQ,GAAe,IAG7D,KAAK,cAAc,GAAM,GAAW,GAAQ,GAAa,KAAK,OAYtE,EAAK,QAAQ,UAAU,6BAA+B,UAAY,CAOhE,OALI,GAAY,OAAO,KAAK,KAAK,cAC7B,EAAiB,EAAU,OAC3B,EAAc,GACd,EAAqB,GAEhB,EAAI,EAAG,EAAI,EAAgB,IAAK,CACvC,GAAI,GAAW,EAAK,SAAS,WAAW,EAAU,IAC9C,EAAQ,EAAS,UAErB,EAAmB,IAAW,GAAmB,GAAS,GAC1D,EAAmB,IAAU,EAE7B,EAAY,IAAW,GAAY,GAAS,GAC5C,EAAY,IAAU,KAAK,aAAa,GAK1C,OAFI,GAAS,OAAO,KAAK,KAAK,SAErB,EAAI,EAAG,EAAI,EAAO,OAAQ,IAAK,CACtC,GAAI,GAAY,EAAO,GACvB,EAAY,GAAa,EAAY,GAAa,EAAmB,GAGvE,KAAK,mBAAqB,GAQ5B,EAAK,QAAQ,UAAU,mBAAqB,UAAY,CAMtD,OALI,GAAe,GACf,EAAY,OAAO,KAAK,KAAK,sBAC7B,EAAkB,EAAU,OAC5B,EAAe,OAAO,OAAO,MAExB,EAAI,EAAG,EAAI,EAAiB,IAAK,CAaxC,OAZI,GAAW,EAAK,SAAS,WAAW,EAAU,IAC9C,EAAY,EAAS,UACrB,EAAc,KAAK,aAAa,GAChC,EAAc,GAAI,GAAK,OACvB,EAAkB,KAAK,qBAAqB,GAC5C,EAAQ,OAAO,KAAK,GACpB,EAAc,EAAM,OAGpB,EAAa,KAAK,QAAQ,GAAW,OAAS,EAC9C,EAAW,KAAK,WAAW,EAAS,QAAQ,OAAS,EAEhD,EAAI,EAAG,EAAI,EAAa,IAAK,CACpC,GAAI,GAAO,EAAM,GACb,EAAK,EAAgB,GACrB,EAAY,KAAK,cAAc,GAAM,OACrC,EAAK,EAAO,EAEhB,AAAI,EAAa,KAAU,OACzB,GAAM,EAAK,IAAI,KAAK,cAAc,GAAO,KAAK,eAC9C,EAAa,GAAQ,GAErB,EAAM,EAAa,GAGrB,EAAQ,EAAQ,OAAK,IAAM,GAAK,GAAO,MAAK,IAAO,GAAI,KAAK,GAAK,KAAK,GAAM,GAAc,KAAK,mBAAmB,KAAe,GACjI,GAAS,EACT,GAAS,EACT,EAAqB,KAAK,MAAM,EAAQ,KAAQ,IAQhD,EAAY,OAAO,EAAW,GAGhC,EAAa,GAAY,EAG3B,KAAK,aAAe,GAQtB,EAAK,QAAQ,UAAU,eAAiB,UAAY,CAClD,KAAK,SAAW,EAAK,SAAS,UAC5B,OAAO,KAAK,KAAK,eAAe,SAYpC,EAAK,QAAQ,UAAU,MAAQ,UAAY,CACzC,YAAK,+BACL,KAAK,qBACL,KAAK,iBAEE,GAAI,GAAK,MAAM,CACpB,cAAe,KAAK,cACpB,aAAc,KAAK,aACnB,SAAU,KAAK,SACf,OAAQ,OAAO,KAAK,KAAK,SACzB,SAAU,KAAK,kBAkBnB,EAAK,QAAQ,UAAU,IAAM,SAAU,EAAI,CACzC,GAAI,GAAO,MAAM,UAAU,MAAM,KAAK,UAAW,GACjD,EAAK,QAAQ,MACb,EAAG,MAAM,KAAM,IAcjB,EAAK,UAAY,SAAU,EAAM,EAAO,EAAU,CAShD,OARI,GAAiB,OAAO,OAAO,MAC/B,EAAe,OAAO,KAAK,GAAY,IAOlC,EAAI,EAAG,EAAI,EAAa,OAAQ,IAAK,CAC5C,GAAI,GAAM,EAAa,GACvB,EAAe,GAAO,EAAS,GAAK,QAGtC,KAAK,SAAW,OAAO,OAAO,MAE1B,IAAS,QACX,MAAK,SAAS,GAAQ,OAAO,OAAO,MACpC,KAAK,SAAS,GAAM,GAAS,IAajC,EAAK,UAAU,UAAU,QAAU,SAAU,EAAgB,CAG3D,OAFI,GAAQ,OAAO,KAAK,EAAe,UAE9B,EAAI,EAAG,EAAI,EAAM,OAAQ,IAAK,CACrC,GAAI,GAAO,EAAM,GACb,EAAS,OAAO,KAAK,EAAe,SAAS,IAEjD,AAAI,KAAK,SAAS,IAAS,MACzB,MAAK,SAAS,GAAQ,OAAO,OAAO,OAGtC,OAAS,GAAI,EAAG,EAAI,EAAO,OAAQ,IAAK,CACtC,GAAI,GAAQ,EAAO,GACf,EAAO,OAAO,KAAK,EAAe,SAAS,GAAM,IAErD,AAAI,KAAK,SAAS,GAAM,IAAU,MAChC,MAAK,SAAS,GAAM,GAAS,OAAO,OAAO,OAG7C,OAAS,GAAI,EAAG,EAAI,EAAK,OAAQ,IAAK,CACpC,GAAI,GAAM,EAAK,GAEf,AAAI,KAAK,SAAS,GAAM,GAAO,IAAQ,KACrC,KAAK,SAAS,GAAM,GAAO,GAAO,EAAe,SAAS,GAAM,GAAO,GAEvE,KAAK,SAAS,GAAM,GAAO,GAAO,KAAK,SAAS,GAAM,GAAO,GAAK,OAAO,EAAe,SAAS,GAAM,GAAO,QAexH,EAAK,UAAU,UAAU,IAAM,SAAU,EAAM,EAAO,EAAU,CAC9D,GAAI,CAAE,KAAQ,MAAK,UAAW,CAC5B,KAAK,SAAS,GAAQ,OAAO,OAAO,MACpC,KAAK,SAAS,GAAM,GAAS,EAC7B,OAGF,GAAI,CAAE,KAAS,MAAK,SAAS,IAAQ,CACnC,KAAK,SAAS,GAAM,GAAS,EAC7B,OAKF,OAFI,GAAe,OAAO,KAAK,GAEtB,EAAI,EAAG,EAAI,EAAa,OAAQ,IAAK,CAC5C,GAAI,GAAM,EAAa,GAEvB,AAAI,IAAO,MAAK,SAAS,GAAM,GAC7B,KAAK,SAAS,GAAM,GAAO,GAAO,KAAK,SAAS,GAAM,GAAO,GAAK,OAAO,EAAS,IAElF,KAAK,SAAS,GAAM,GAAO,GAAO,EAAS,KAejD,EAAK,MAAQ,SAAU,EAAW,CAChC,KAAK,QAAU,GACf,KAAK,UAAY,GA2BnB,EAAK,MAAM,SAAW,GAAI,QAAQ,KAClC,EAAK,MAAM,SAAS,KAAO,EAC3B,EAAK,MAAM,SAAS,QAAU,EAC9B,EAAK,MAAM,SAAS,SAAW,EAa/B,EAAK,MAAM,SAAW,CAIpB,SAAU,EAMV,SAAU,EAMV,WAAY,GA0Bd,EAAK,MAAM,UAAU,OAAS,SAAU,EAAQ,CAC9C,MAAM,UAAY,IAChB,GAAO,OAAS,KAAK,WAGjB,SAAW,IACf,GAAO,MAAQ,GAGX,eAAiB,IACrB,GAAO,YAAc,IAGjB,YAAc,IAClB,GAAO,SAAW,EAAK,MAAM,SAAS,MAGnC,EAAO,SAAW,EAAK,MAAM,SAAS,SAAa,EAAO,KAAK,OAAO,IAAM,EAAK,MAAM,UAC1F,GAAO,KAAO,IAAM,EAAO,MAGxB,EAAO,SAAW,EAAK,MAAM,SAAS,UAAc,EAAO,KAAK,MAAM,KAAO,EAAK,MAAM,UAC3F,GAAO,KAAO,GAAK,EAAO,KAAO,KAG7B,YAAc,IAClB,GAAO,SAAW,EAAK,MAAM,SAAS,UAGxC,KAAK,QAAQ,KAAK,GAEX,MAUT,EAAK,MAAM,UAAU,UAAY,UAAY,CAC3C,OAAS,GAAI,EAAG,EAAI,KAAK,QAAQ,OAAQ,IACvC,GAAI,KAAK,QAAQ,GAAG,UAAY,EAAK,MAAM,SAAS,WAClD,MAAO,GAIX,MAAO,IA6BT,EAAK,MAAM,UAAU,KAAO,SAAU,EAAM,EAAS,CACnD,GAAI,MAAM,QAAQ,GAChB,SAAK,QAAQ,SAAU,EAAG,CAAE,KAAK,KAAK,EAAG,EAAK,MAAM,MAAM,KAAa,MAChE,KAGT,GAAI,GAAS,GAAW,GACxB,SAAO,KAAO,EAAK,WAEnB,KAAK,OAAO,GAEL,MAET,EAAK,gBAAkB,SAAU,EAAS,EAAO,EAAK,CACpD,KAAK,KAAO,kBACZ,KAAK,QAAU,EACf,KAAK,MAAQ,EACb,KAAK,IAAM,GAGb,EAAK,gBAAgB,UAAY,GAAI,OACrC,EAAK,WAAa,SAAU,EAAK,CAC/B,KAAK,QAAU,GACf,KAAK,IAAM,EACX,KAAK,OAAS,EAAI,OAClB,KAAK,IAAM,EACX,KAAK,MAAQ,EACb,KAAK,oBAAsB,IAG7B,EAAK,WAAW,UAAU,IAAM,UAAY,CAG1C,OAFI,GAAQ,EAAK,WAAW,QAErB,GACL,EAAQ,EAAM,OAIlB,EAAK,WAAW,UAAU,YAAc,UAAY,CAKlD,OAJI,GAAY,GACZ,EAAa,KAAK,MAClB,EAAW,KAAK,IAEX,EAAI,EAAG,EAAI,KAAK,oBAAoB,OAAQ,IACnD,EAAW,KAAK,oBAAoB,GACpC,EAAU,KAAK,KAAK,IAAI,MAAM,EAAY,IAC1C,EAAa,EAAW,EAG1B,SAAU,KAAK,KAAK,IAAI,MAAM,EAAY,KAAK,MAC/C,KAAK,oBAAoB,OAAS,EAE3B,EAAU,KAAK,KAGxB,EAAK,WAAW,UAAU,KAAO,SAAU,EAAM,CAC/C,KAAK,QAAQ,KAAK,CAChB,KAAM,EACN,IAAK,KAAK,cACV,MAAO,KAAK,MACZ,IAAK,KAAK,MAGZ,KAAK,MAAQ,KAAK,KAGpB,EAAK,WAAW,UAAU,gBAAkB,UAAY,CACtD,KAAK,oBAAoB,KAAK,KAAK,IAAM,GACzC,KAAK,KAAO,GAGd,EAAK,WAAW,UAAU,KAAO,UAAY,CAC3C,GAAI,KAAK,KAAO,KAAK,OACnB,MAAO,GAAK,WAAW,IAGzB,GAAI,GAAO,KAAK,IAAI,OAAO,KAAK,KAChC,YAAK,KAAO,EACL,GAGT,EAAK,WAAW,UAAU,MAAQ,UAAY,CAC5C,MAAO,MAAK,IAAM,KAAK,OAGzB,EAAK,WAAW,UAAU,OAAS,UAAY,CAC7C,AAAI,KAAK,OAAS,KAAK,KACrB,MAAK,KAAO,GAGd,KAAK,MAAQ,KAAK,KAGpB,EAAK,WAAW,UAAU,OAAS,UAAY,CAC7C,KAAK,KAAO,GAGd,EAAK,WAAW,UAAU,eAAiB,UAAY,CACrD,GAAI,GAAM,EAEV,EACE,GAAO,KAAK,OACZ,EAAW,EAAK,WAAW,SACpB,EAAW,IAAM,EAAW,IAErC,AAAI,GAAQ,EAAK,WAAW,KAC1B,KAAK,UAIT,EAAK,WAAW,UAAU,KAAO,UAAY,CAC3C,MAAO,MAAK,IAAM,KAAK,QAGzB,EAAK,WAAW,IAAM,MACtB,EAAK,WAAW,MAAQ,QACxB,EAAK,WAAW,KAAO,OACvB,EAAK,WAAW,cAAgB,gBAChC,EAAK,WAAW,MAAQ,QACxB,EAAK,WAAW,SAAW,WAE3B,EAAK,WAAW,SAAW,SAAU,EAAO,CAC1C,SAAM,SACN,EAAM,KAAK,EAAK,WAAW,OAC3B,EAAM,SACC,EAAK,WAAW,SAGzB,EAAK,WAAW,QAAU,SAAU,EAAO,CAQzC,GAPI,EAAM,QAAU,GAClB,GAAM,SACN,EAAM,KAAK,EAAK,WAAW,OAG7B,EAAM,SAEF,EAAM,OACR,MAAO,GAAK,WAAW,SAI3B,EAAK,WAAW,gBAAkB,SAAU,EAAO,CACjD,SAAM,SACN,EAAM,iBACN,EAAM,KAAK,EAAK,WAAW,eACpB,EAAK,WAAW,SAGzB,EAAK,WAAW,SAAW,SAAU,EAAO,CAC1C,SAAM,SACN,EAAM,iBACN,EAAM,KAAK,EAAK,WAAW,OACpB,EAAK,WAAW,SAGzB,EAAK,WAAW,OAAS,SAAU,EAAO,CACxC,AAAI,EAAM,QAAU,GAClB,EAAM,KAAK,EAAK,WAAW,OAe/B,EAAK,WAAW,cAAgB,EAAK,UAAU,UAE/C,EAAK,WAAW,QAAU,SAAU,EAAO,CACzC,OAAa,CACX,GAAI,GAAO,EAAM,OAEjB,GAAI,GAAQ,EAAK,WAAW,IAC1B,MAAO,GAAK,WAAW,OAIzB,GAAI,EAAK,WAAW,IAAM,GAAI,CAC5B,EAAM,kBACN,SAGF,GAAI,GAAQ,IACV,MAAO,GAAK,WAAW,SAGzB,GAAI,GAAQ,IACV,SAAM,SACF,EAAM,QAAU,GAClB,EAAM,KAAK,EAAK,WAAW,MAEtB,EAAK,WAAW,gBAGzB,GAAI,GAAQ,IACV,SAAM,SACF,EAAM,QAAU,GAClB,EAAM,KAAK,EAAK,WAAW,MAEtB,EAAK,WAAW,SAczB,GARI,GAAQ,KAAO,EAAM,UAAY,GAQjC,GAAQ,KAAO,EAAM,UAAY,EACnC,SAAM,KAAK,EAAK,WAAW,UACpB,EAAK,WAAW,QAGzB,GAAI,EAAK,MAAM,EAAK,WAAW,eAC7B,MAAO,GAAK,WAAW,UAK7B,EAAK,YAAc,SAAU,EAAK,EAAO,CACvC,KAAK,MAAQ,GAAI,GAAK,WAAY,GAClC,KAAK,MAAQ,EACb,KAAK,cAAgB,GACrB,KAAK,UAAY,GAGnB,EAAK,YAAY,UAAU,MAAQ,UAAY,CAC7C,KAAK,MAAM,MACX,KAAK,QAAU,KAAK,MAAM,QAI1B,OAFI,GAAQ,EAAK,YAAY,YAEtB,GACL,EAAQ,EAAM,MAGhB,MAAO,MAAK,OAGd,EAAK,YAAY,UAAU,WAAa,UAAY,CAClD,MAAO,MAAK,QAAQ,KAAK,YAG3B,EAAK,YAAY,UAAU,cAAgB,UAAY,CACrD,GAAI,GAAS,KAAK,aAClB,YAAK,WAAa,EACX,GAGT,EAAK,YAAY,UAAU,WAAa,UAAY,CAClD,GAAI,GAAkB,KAAK,cAC3B,KAAK,MAAM,OAAO,GAClB,KAAK,cAAgB,IAGvB,EAAK,YAAY,YAAc,SAAU,EAAQ,CAC/C,GAAI,GAAS,EAAO,aAEpB,GAAI,GAAU,KAId,OAAQ,EAAO,UACR,GAAK,WAAW,SACnB,MAAO,GAAK,YAAY,kBACrB,GAAK,WAAW,MACnB,MAAO,GAAK,YAAY,eACrB,GAAK,WAAW,KACnB,MAAO,GAAK,YAAY,kBAExB,GAAI,GAAe,4CAA8C,EAAO,KAExE,KAAI,GAAO,IAAI,QAAU,GACvB,IAAgB,gBAAkB,EAAO,IAAM,KAG3C,GAAI,GAAK,gBAAiB,EAAc,EAAO,MAAO,EAAO,OAIzE,EAAK,YAAY,cAAgB,SAAU,EAAQ,CACjD,GAAI,GAAS,EAAO,gBAEpB,GAAI,GAAU,KAId,QAAQ,EAAO,SACR,IACH,EAAO,cAAc,SAAW,EAAK,MAAM,SAAS,WACpD,UACG,IACH,EAAO,cAAc,SAAW,EAAK,MAAM,SAAS,SACpD,cAEA,GAAI,GAAe,kCAAoC,EAAO,IAAM,IACpE,KAAM,IAAI,GAAK,gBAAiB,EAAc,EAAO,MAAO,EAAO,KAGvE,GAAI,GAAa,EAAO,aAExB,GAAI,GAAc,KAAW,CAC3B,GAAI,GAAe,yCACnB,KAAM,IAAI,GAAK,gBAAiB,EAAc,EAAO,MAAO,EAAO,KAGrE,OAAQ,EAAW,UACZ,GAAK,WAAW,MACnB,MAAO,GAAK,YAAY,eACrB,GAAK,WAAW,KACnB,MAAO,GAAK,YAAY,kBAExB,GAAI,GAAe,mCAAqC,EAAW,KAAO,IAC1E,KAAM,IAAI,GAAK,gBAAiB,EAAc,EAAW,MAAO,EAAW,QAIjF,EAAK,YAAY,WAAa,SAAU,EAAQ,CAC9C,GAAI,GAAS,EAAO,gBAEpB,GAAI,GAAU,KAId,IAAI,EAAO,MAAM,UAAU,QAAQ,EAAO,MAAQ,GAAI,CACpD,GAAI,GAAiB,EAAO,MAAM,UAAU,IAAI,SAAU,EAAG,CAAE,MAAO,IAAM,EAAI,MAAO,KAAK,MACxF,EAAe,uBAAyB,EAAO,IAAM,uBAAyB,EAElF,KAAM,IAAI,GAAK,gBAAiB,EAAc,EAAO,MAAO,EAAO,KAGrE,EAAO,cAAc,OAAS,CAAC,EAAO,KAEtC,GAAI,GAAa,EAAO,aAExB,GAAI,GAAc,KAAW,CAC3B,GAAI,GAAe,gCACnB,KAAM,IAAI,GAAK,gBAAiB,EAAc,EAAO,MAAO,EAAO,KAGrE,OAAQ,EAAW,UACZ,GAAK,WAAW,KACnB,MAAO,GAAK,YAAY,kBAExB,GAAI,GAAe,0BAA4B,EAAW,KAAO,IACjE,KAAM,IAAI,GAAK,gBAAiB,EAAc,EAAW,MAAO,EAAW,QAIjF,EAAK,YAAY,UAAY,SAAU,EAAQ,CAC7C,GAAI,GAAS,EAAO,gBAEpB,GAAI,GAAU,KAId,GAAO,cAAc,KAAO,EAAO,IAAI,cAEnC,EAAO,IAAI,QAAQ,MAAQ,IAC7B,GAAO,cAAc,YAAc,IAGrC,GAAI,GAAa,EAAO,aAExB,GAAI,GAAc,KAAW,CAC3B,EAAO,aACP,OAGF,OAAQ,EAAW,UACZ,GAAK,WAAW,KACnB,SAAO,aACA,EAAK,YAAY,cACrB,GAAK,WAAW,MACnB,SAAO,aACA,EAAK,YAAY,eACrB,GAAK,WAAW,cACnB,MAAO,GAAK,YAAY,sBACrB,GAAK,WAAW,MACnB,MAAO,GAAK,YAAY,eACrB,GAAK,WAAW,SACnB,SAAO,aACA,EAAK,YAAY,sBAExB,GAAI,GAAe,2BAA6B,EAAW,KAAO,IAClE,KAAM,IAAI,GAAK,gBAAiB,EAAc,EAAW,MAAO,EAAW,QAIjF,EAAK,YAAY,kBAAoB,SAAU,EAAQ,CACrD,GAAI,GAAS,EAAO,gBAEpB,GAAI,GAAU,KAId,IAAI,GAAe,SAAS,EAAO,IAAK,IAExC,GAAI,MAAM,GAAe,CACvB,GAAI,GAAe,gCACnB,KAAM,IAAI,GAAK,gBAAiB,EAAc,EAAO,MAAO,EAAO,KAGrE,EAAO,cAAc,aAAe,EAEpC,GAAI,GAAa,EAAO,aAExB,GAAI,GAAc,KAAW,CAC3B,EAAO,aACP,OAGF,OAAQ,EAAW,UACZ,GAAK,WAAW,KACnB,SAAO,aACA,EAAK,YAAY,cACrB,GAAK,WAAW,MACnB,SAAO,aACA,EAAK,YAAY,eACrB,GAAK,WAAW,cACnB,MAAO,GAAK,YAAY,sBACrB,GAAK,WAAW,MACnB,MAAO,GAAK,YAAY,eACrB,GAAK,WAAW,SACnB,SAAO,aACA,EAAK,YAAY,sBAExB,GAAI,GAAe,2BAA6B,EAAW,KAAO,IAClE,KAAM,IAAI,GAAK,gBAAiB,EAAc,EAAW,MAAO,EAAW,QAIjF,EAAK,YAAY,WAAa,SAAU,EAAQ,CAC9C,GAAI,GAAS,EAAO,gBAEpB,GAAI,GAAU,KAId,IAAI,GAAQ,SAAS,EAAO,IAAK,IAEjC,GAAI,MAAM,GAAQ,CAChB,GAAI,GAAe,wBACnB,KAAM,IAAI,GAAK,gBAAiB,EAAc,EAAO,MAAO,EAAO,KAGrE,EAAO,cAAc,MAAQ,EAE7B,GAAI,GAAa,EAAO,aAExB,GAAI,GAAc,KAAW,CAC3B,EAAO,aACP,OAGF,OAAQ,EAAW,UACZ,GAAK,WAAW,KACnB,SAAO,aACA,EAAK,YAAY,cACrB,GAAK,WAAW,MACnB,SAAO,aACA,EAAK,YAAY,eACrB,GAAK,WAAW,cACnB,MAAO,GAAK,YAAY,sBACrB,GAAK,WAAW,MACnB,MAAO,GAAK,YAAY,eACrB,GAAK,WAAW,SACnB,SAAO,aACA,EAAK,YAAY,sBAExB,GAAI,GAAe,2BAA6B,EAAW,KAAO,IAClE,KAAM,IAAI,GAAK,gBAAiB,EAAc,EAAW,MAAO,EAAW,QAQ7E,SAAU,EAAM,EAAS,CACzB,AAAI,MAAO,SAAW,YAAc,OAAO,IAEzC,OAAO,GACF,AAAI,MAAO,IAAY,SAM5B,GAAO,QAAU,IAGjB,EAAK,KAAO,KAEd,KAAM,UAAY,CAMlB,MAAO,WCh5GX;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,GAeA,GAAI,IAAkB,UAOtB,GAAO,QAAU,GAUjB,YAAoB,EAAQ,CAC1B,GAAI,GAAM,GAAK,EACX,EAAQ,GAAgB,KAAK,GAEjC,GAAI,CAAC,EACH,MAAO,GAGT,GAAI,GACA,EAAO,GACP,EAAQ,EACR,EAAY,EAEhB,IAAK,EAAQ,EAAM,MAAO,EAAQ,EAAI,OAAQ,IAAS,CACrD,OAAQ,EAAI,WAAW,QAChB,IACH,EAAS,SACT,UACG,IACH,EAAS,QACT,UACG,IACH,EAAS,QACT,UACG,IACH,EAAS,OACT,UACG,IACH,EAAS,OACT,cAEA,SAGJ,AAAI,IAAc,GAChB,IAAQ,EAAI,UAAU,EAAW,IAGnC,EAAY,EAAQ,EACpB,GAAQ,EAGV,MAAO,KAAc,EACjB,EAAO,EAAI,UAAU,EAAW,GAChC,KCtDN,OAAiB,QCKjB,AAAK,OAAO,SACV,QAAO,QAAU,SAAU,EAAa,CACtC,GAAM,GAA2B,GACjC,OAAW,KAAO,QAAO,KAAK,GAE5B,EAAK,KAAK,CAAC,EAAK,EAAI,KAGtB,MAAO,KAIX,AAAK,OAAO,QACV,QAAO,OAAS,SAAU,EAAa,CACrC,GAAM,GAAiB,GACvB,OAAW,KAAO,QAAO,KAAK,GAE5B,EAAK,KAAK,EAAI,IAGhB,MAAO,KAMX,AAAI,MAAO,UAAY,aAGhB,SAAQ,UAAU,UACrB,SAAQ,UAAU,SAAW,SAC3B,EAA8B,EACxB,CACN,AAAI,MAAO,IAAM,SACf,MAAK,WAAa,EAAE,KACpB,KAAK,UAAY,EAAE,KAEnB,MAAK,WAAa,EAClB,KAAK,UAAY,KAKlB,QAAQ,UAAU,aACrB,SAAQ,UAAU,YAAc,YAC3B,EACG,CACN,GAAM,GAAS,KAAK,WACpB,GAAI,EAAQ,CACV,AAAI,EAAM,SAAW,GACnB,EAAO,YAAY,MAGrB,OAAS,GAAI,EAAM,OAAS,EAAG,GAAK,EAAG,IAAK,CAC1C,GAAI,GAAO,EAAM,GACjB,AAAI,MAAO,IAAS,SAClB,EAAO,SAAS,eAAe,GACxB,EAAK,YACZ,EAAK,WAAW,YAAY,GAG9B,AAAK,EAGH,EAAO,aAAa,KAAK,gBAAkB,GAF3C,EAAO,aAAa,EAAM,WCnEtC,OAAuB,OAiChB,YACL,EACmB,CACnB,GAAM,GAAY,GAAI,KAChB,EAAY,GAAI,KACtB,OAAW,KAAO,GAAM,CACtB,GAAM,CAAC,EAAM,GAAQ,EAAI,SAAS,MAAM,KAGlC,EAAW,EAAI,SACf,EAAW,EAAI,MAGf,EAAO,eAAW,EAAI,MACzB,QAAQ,mBAAoB,IAC5B,QAAQ,OAAQ,KAGnB,GAAI,EAAM,CACR,GAAM,GAAS,EAAU,IAAI,GAG7B,AAAK,EAAQ,IAAI,GASf,EAAU,IAAI,EAAU,CACtB,WACA,QACA,OACA,WAZF,GAAO,MAAQ,EAAI,MACnB,EAAO,KAAQ,EAGf,EAAQ,IAAI,QAcd,GAAU,IAAI,EAAU,CACtB,WACA,QACA,SAIN,MAAO,GCjFT,OAAuB,OAsChB,YACL,EAA2B,EACD,CAC1B,GAAM,GAAY,GAAI,QAAO,EAAO,UAAW,OACzC,EAAY,CAAC,EAAY,EAAc,IACpC,GAAG,4BAA+B,WAI3C,MAAO,AAAC,IAAkB,CACxB,EAAQ,EACL,QAAQ,gBAAiB,KACzB,OAGH,GAAM,GAAQ,GAAI,QAAO,MAAM,EAAO,cACpC,EACG,QAAQ,uBAAwB,QAChC,QAAQ,EAAW,QACnB,OAGL,MAAO,IACL,GACI,eAAW,GACX,GAED,QAAQ,EAAO,GACf,QAAQ,8BAA+B,OCpCzC,YACL,EACqB,CACrB,GAAM,GAAS,GAAK,MAAa,MAAM,CAAC,QAAS,SAIjD,MAHe,IAAK,MAAa,YAAY,EAAO,GAG7C,QACA,EAAM,QAWR,YACL,EAA4B,EACV,CAzEpB,MA0EE,GAAM,GAAU,GAAI,KAAuB,GAGrC,EAA2B,GACjC,OAAS,GAAI,EAAG,EAAI,EAAM,OAAQ,IAChC,OAAW,KAAU,GACnB,AAAI,EAAM,GAAG,WAAW,EAAO,OAC7B,GAAO,EAAO,MAAQ,GACtB,EAAQ,OAAO,IAIrB,OAAW,KAAU,GACnB,AAAI,SAAK,iBAAL,wBAAsB,EAAO,QAC/B,GAAO,EAAO,MAAQ,IAG1B,MAAO,GC0BT,YAAoB,EAAa,EAAuB,CACtD,GAAM,CAAC,EAAG,GAAK,CAAC,GAAI,KAAI,GAAI,GAAI,KAAI,IACpC,MAAO,CACL,GAAG,GAAI,KAAI,CAAC,GAAG,GAAG,OAAO,GAAS,CAAC,EAAE,IAAI,MAWtC,WAAa,CAgCX,YAAY,CAAE,SAAQ,OAAM,WAAwB,CACzD,KAAK,QAAU,EAGf,KAAK,UAAY,GAAuB,GACxC,KAAK,UAAY,GAAuB,EAAQ,IAGhD,KAAK,UAAU,UAAY,GAAI,QAAO,EAAO,WAG7C,KAAK,MAAQ,KAAK,UAAY,CAG5B,AAAI,EAAO,KAAK,SAAW,GAAK,EAAO,KAAK,KAAO,KACjD,KAAK,IAAK,KAAa,EAAO,KAAK,KAC1B,EAAO,KAAK,OAAS,GAC9B,KAAK,IAAK,KAAa,cAAc,GAAG,EAAO,OAIjD,GAAM,GAAM,GAAW,CACrB,UAAW,iBAAkB,WAC5B,EAAQ,UAGX,OAAW,KAAQ,GAAO,KAAK,IAAI,GACjC,IAAa,KAAO,KAAQ,KAAa,IAEzC,OAAW,KAAM,GACf,KAAK,SAAS,OAAO,EAAK,IAC1B,KAAK,eAAe,OAAO,EAAK,IAKpC,KAAK,IAAI,YAGT,KAAK,MAAM,QAAS,CAAE,MAAO,MAC7B,KAAK,MAAM,QAGX,OAAW,KAAO,GAChB,KAAK,IAAI,KAoBR,OAAO,EAA6B,CACzC,GAAI,EACF,GAAI,CACF,GAAM,GAAY,KAAK,UAAU,GAG3B,EAAU,GAAiB,GAC9B,OAAO,GACN,EAAO,WAAa,KAAK,MAAM,SAAS,YAItC,EAAS,KAAK,MAAM,OAAO,GAAG,MAGjC,OAAyB,CAAC,EAAM,CAAE,MAAK,QAAO,eAAgB,CAC7D,GAAM,GAAW,KAAK,UAAU,IAAI,GACpC,GAAI,MAAO,IAAa,YAAa,CACnC,GAAM,CAAE,WAAU,QAAO,OAAM,UAAW,EAGpC,EAAQ,GACZ,EACA,OAAO,KAAK,EAAU,WAIlB,EAAQ,CAAC,CAAC,EAAS,EAAC,OAAO,OAAO,GAAO,MAAM,GAAK,GAC1D,EAAK,KAAK,CACR,WACA,MAAO,EAAU,GACjB,KAAO,EAAU,GACjB,MAAO,EAAS,GAAI,GACpB,UAGJ,MAAO,IACN,IAGF,KAAK,CAAC,EAAG,IAAM,EAAE,MAAQ,EAAE,OAG3B,OAAO,CAAC,EAAO,IAAW,CACzB,GAAM,GAAW,KAAK,UAAU,IAAI,EAAO,UAC3C,GAAI,MAAO,IAAa,YAAa,CACnC,GAAM,GAAM,UAAY,GACpB,EAAS,OAAQ,SACjB,EAAS,SACb,EAAM,IAAI,EAAK,CAAC,GAAG,EAAM,IAAI,IAAQ,GAAI,IAE3C,MAAO,IACN,GAAI,MAGL,EACJ,GAAI,KAAK,QAAQ,YAAa,CAC5B,GAAM,GAAS,KAAK,MAAM,MAAM,GAAW,CACzC,OAAW,KAAU,GACnB,EAAQ,KAAK,EAAO,KAAM,CACxB,OAAQ,CAAC,SACT,SAAU,KAAK,MAAM,SAAS,SAC9B,SAAU,KAAK,MAAM,SAAS,aAKpC,EAAc,EAAO,OACjB,OAAO,KAAK,EAAO,GAAG,UAAU,UAChC,GAIN,MAAO,IACL,MAAO,CAAC,GAAG,EAAO,WACf,MAAO,IAAgB,aAAe,CAAE,sBAIvC,EAAN,CACA,QAAQ,KAAK,kBAAkB,uCAKnC,MAAO,CAAE,MAAO,ML/PpB,GAAI,GAqBJ,YACE,EACe,gCACf,GAAI,GAAO,UAGX,GAAI,MAAO,SAAW,aAAe,gBAAkB,QAAQ,CAC7D,GAAM,GAAS,SAAS,cAAiC,eACnD,CAAC,GAAQ,EAAO,IAAI,MAAM,WAGhC,EAAO,EAAK,QAAQ,KAAM,GAI5B,GAAM,GAAU,GAChB,OAAW,KAAQ,GAAO,KAAM,CAC9B,OAAQ,OAGD,KACH,EAAQ,KAAK,GAAG,gBAChB,UAGG,SACA,KACH,EAAQ,KAAK,GAAG,gBAChB,MAIJ,AAAI,IAAS,MACX,EAAQ,KAAK,GAAG,cAAiB,YAIrC,AAAI,EAAO,KAAK,OAAS,GACvB,EAAQ,KAAK,GAAG,2BAGd,EAAQ,QACV,MAAM,eACJ,GAAG,oCACH,GAAG,MAeT,YACE,EACwB,gCACxB,OAAQ,EAAQ,UAGT,GACH,YAAM,IAAqB,EAAQ,KAAK,QACxC,EAAQ,GAAI,GAAO,EAAQ,MACpB,CACL,KAAM,OAIL,GACH,MAAO,CACL,KAAM,EACN,KAAM,EAAQ,EAAM,OAAO,EAAQ,MAAQ,CAAE,MAAO,aAKtD,KAAM,IAAI,WAAU,2BAS1B,KAAK,KAAO,WAGZ,iBAAiB,UAAW,AAAM,GAAM,0BACtC,YAAY,KAAM,IAAQ,EAAG", - "names": [] -} diff --git a/assets/stylesheets/palette.e6a45f82.min.css b/assets/stylesheets/palette.e6a45f82.min.css deleted file mode 100644 index 9d16769c2a22..000000000000 --- a/assets/stylesheets/palette.e6a45f82.min.css +++ /dev/null @@ -1 +0,0 @@ -[data-md-color-accent=red]{--md-accent-fg-color:#ff1947;--md-accent-fg-color--transparent:rgba(255,25,71,.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-accent=pink]{--md-accent-fg-color:#f50056;--md-accent-fg-color--transparent:rgba(245,0,86,.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-accent=purple]{--md-accent-fg-color:#df41fb;--md-accent-fg-color--transparent:rgba(223,65,251,.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-accent=deep-purple]{--md-accent-fg-color:#7c4dff;--md-accent-fg-color--transparent:rgba(124,77,255,.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-accent=indigo]{--md-accent-fg-color:#526cfe;--md-accent-fg-color--transparent:rgba(82,108,254,.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-accent=blue]{--md-accent-fg-color:#4287ff;--md-accent-fg-color--transparent:rgba(66,135,255,.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-accent=light-blue]{--md-accent-fg-color:#0091eb;--md-accent-fg-color--transparent:rgba(0,145,235,.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-accent=cyan]{--md-accent-fg-color:#00bad6;--md-accent-fg-color--transparent:rgba(0,186,214,.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-accent=teal]{--md-accent-fg-color:#00bda4;--md-accent-fg-color--transparent:rgba(0,189,164,.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-accent=green]{--md-accent-fg-color:#00c753;--md-accent-fg-color--transparent:rgba(0,199,83,.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-accent=light-green]{--md-accent-fg-color:#63de17;--md-accent-fg-color--transparent:rgba(99,222,23,.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-accent=lime]{--md-accent-fg-color:#b0eb00;--md-accent-fg-color--transparent:rgba(176,235,0,.1);--md-accent-bg-color:rgba(0,0,0,.87);--md-accent-bg-color--light:rgba(0,0,0,.54)}[data-md-color-accent=yellow]{--md-accent-fg-color:#ffd500;--md-accent-fg-color--transparent:rgba(255,213,0,.1);--md-accent-bg-color:rgba(0,0,0,.87);--md-accent-bg-color--light:rgba(0,0,0,.54)}[data-md-color-accent=amber]{--md-accent-fg-color:#fa0;--md-accent-fg-color--transparent:rgba(255,170,0,.1);--md-accent-bg-color:rgba(0,0,0,.87);--md-accent-bg-color--light:rgba(0,0,0,.54)}[data-md-color-accent=orange]{--md-accent-fg-color:#ff9100;--md-accent-fg-color--transparent:rgba(255,145,0,.1);--md-accent-bg-color:rgba(0,0,0,.87);--md-accent-bg-color--light:rgba(0,0,0,.54)}[data-md-color-accent=deep-orange]{--md-accent-fg-color:#ff6e42;--md-accent-fg-color--transparent:rgba(255,110,66,.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-primary=red]{--md-primary-fg-color:#ef5552;--md-primary-fg-color--light:#e57171;--md-primary-fg-color--dark:#e53734;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-primary=pink]{--md-primary-fg-color:#e92063;--md-primary-fg-color--light:#ec417a;--md-primary-fg-color--dark:#c3185d;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-primary=purple]{--md-primary-fg-color:#ab47bd;--md-primary-fg-color--light:#bb69c9;--md-primary-fg-color--dark:#8c24a8;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-primary=deep-purple]{--md-primary-fg-color:#7e56c2;--md-primary-fg-color--light:#9574cd;--md-primary-fg-color--dark:#673ab6;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-primary=indigo]{--md-primary-fg-color:#4051b5;--md-primary-fg-color--light:#5d6cc0;--md-primary-fg-color--dark:#303fa1;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-primary=blue]{--md-primary-fg-color:#2094f3;--md-primary-fg-color--light:#42a5f5;--md-primary-fg-color--dark:#1975d2;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-primary=light-blue]{--md-primary-fg-color:#02a6f2;--md-primary-fg-color--light:#28b5f6;--md-primary-fg-color--dark:#0287cf;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-primary=cyan]{--md-primary-fg-color:#00bdd6;--md-primary-fg-color--light:#25c5da;--md-primary-fg-color--dark:#0097a8;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-primary=teal]{--md-primary-fg-color:#009485;--md-primary-fg-color--light:#26a699;--md-primary-fg-color--dark:#007a6c;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-primary=green]{--md-primary-fg-color:#4cae4f;--md-primary-fg-color--light:#68bb6c;--md-primary-fg-color--dark:#398e3d;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-primary=light-green]{--md-primary-fg-color:#8bc34b;--md-primary-fg-color--light:#9ccc66;--md-primary-fg-color--dark:#689f38;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-primary=lime]{--md-primary-fg-color:#cbdc38;--md-primary-fg-color--light:#d3e156;--md-primary-fg-color--dark:#b0b52c;--md-primary-bg-color:rgba(0,0,0,.87);--md-primary-bg-color--light:rgba(0,0,0,.54)}[data-md-color-primary=yellow]{--md-primary-fg-color:#ffec3d;--md-primary-fg-color--light:#ffee57;--md-primary-fg-color--dark:#fbc02d;--md-primary-bg-color:rgba(0,0,0,.87);--md-primary-bg-color--light:rgba(0,0,0,.54)}[data-md-color-primary=amber]{--md-primary-fg-color:#ffc105;--md-primary-fg-color--light:#ffc929;--md-primary-fg-color--dark:#ffa200;--md-primary-bg-color:rgba(0,0,0,.87);--md-primary-bg-color--light:rgba(0,0,0,.54)}[data-md-color-primary=orange]{--md-primary-fg-color:#ffa724;--md-primary-fg-color--light:#ffa724;--md-primary-fg-color--dark:#fa8900;--md-primary-bg-color:rgba(0,0,0,.87);--md-primary-bg-color--light:rgba(0,0,0,.54)}[data-md-color-primary=deep-orange]{--md-primary-fg-color:#ff6e42;--md-primary-fg-color--light:#ff8a66;--md-primary-fg-color--dark:#f4511f;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-primary=brown]{--md-primary-fg-color:#795649;--md-primary-fg-color--light:#8d6e62;--md-primary-fg-color--dark:#5d4037;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-primary=grey]{--md-primary-fg-color:#757575;--md-primary-fg-color--light:#9e9e9e;--md-primary-fg-color--dark:#616161;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-primary=blue-grey]{--md-primary-fg-color:#546d78;--md-primary-fg-color--light:#607c8a;--md-primary-fg-color--dark:#455a63;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,.7)}[data-md-color-primary=white]{--md-primary-fg-color:#fff;--md-primary-fg-color--light:hsla(0,0%,100%,.7);--md-primary-fg-color--dark:rgba(0,0,0,.07);--md-primary-bg-color:rgba(0,0,0,.87);--md-primary-bg-color--light:rgba(0,0,0,.54);--md-typeset-a-color:#4051b5}@media screen and (min-width:60em){[data-md-color-primary=white] .md-search__form{background-color:rgba(0,0,0,.07)}[data-md-color-primary=white] .md-search__form:hover{background-color:rgba(0,0,0,.32)}[data-md-color-primary=white] .md-search__input+.md-search__icon{color:rgba(0,0,0,.87)}}@media screen and (min-width:76.25em){[data-md-color-primary=white] .md-tabs{border-bottom:.05rem solid rgba(0,0,0,.07)}}[data-md-color-primary=black]{--md-primary-fg-color:#000;--md-primary-fg-color--light:rgba(0,0,0,.54);--md-primary-fg-color--dark:#000;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,.7);--md-typeset-a-color:#4051b5}[data-md-color-primary=black] .md-header{background-color:#000}@media screen and (max-width:59.9375em){[data-md-color-primary=black] .md-nav__source{background-color:rgba(0,0,0,.87)}}@media screen and (min-width:60em){[data-md-color-primary=black] .md-search__form{background-color:hsla(0,0%,100%,.12)}[data-md-color-primary=black] .md-search__form:hover{background-color:hsla(0,0%,100%,.3)}}@media screen and (max-width:76.1875em){html [data-md-color-primary=black] .md-nav--primary .md-nav__title[for=__drawer]{background-color:#000}}@media screen and (min-width:76.25em){[data-md-color-primary=black] .md-tabs{background-color:#000}}@media screen{[data-md-color-scheme=slate]{--md-hue:232;--md-default-fg-color:hsla(var(--md-hue),75%,95%,1);--md-default-fg-color--light:hsla(var(--md-hue),75%,90%,0.62);--md-default-fg-color--lighter:hsla(var(--md-hue),75%,90%,0.32);--md-default-fg-color--lightest:hsla(var(--md-hue),75%,90%,0.12);--md-default-bg-color:hsla(var(--md-hue),15%,21%,1);--md-default-bg-color--light:hsla(var(--md-hue),15%,21%,0.54);--md-default-bg-color--lighter:hsla(var(--md-hue),15%,21%,0.26);--md-default-bg-color--lightest:hsla(var(--md-hue),15%,21%,0.07);--md-code-fg-color:hsla(var(--md-hue),18%,86%,1);--md-code-bg-color:hsla(var(--md-hue),15%,15%,1);--md-code-hl-color:rgba(66,135,255,.15);--md-code-hl-number-color:#e6695b;--md-code-hl-special-color:#f06090;--md-code-hl-function-color:#c973d9;--md-code-hl-constant-color:#9383e2;--md-code-hl-keyword-color:#6791e0;--md-code-hl-string-color:#2fb170;--md-code-hl-name-color:var(--md-code-fg-color);--md-code-hl-operator-color:var(--md-default-fg-color--light);--md-code-hl-punctuation-color:var(--md-default-fg-color--light);--md-code-hl-comment-color:var(--md-default-fg-color--light);--md-code-hl-generic-color:var(--md-default-fg-color--light);--md-code-hl-variable-color:var(--md-default-fg-color--light);--md-typeset-color:var(--md-default-fg-color);--md-typeset-a-color:var(--md-primary-fg-color);--md-typeset-mark-color:rgba(66,135,255,.3);--md-typeset-kbd-color:hsla(var(--md-hue),15%,94%,0.12);--md-typeset-kbd-accent-color:hsla(var(--md-hue),15%,94%,0.2);--md-typeset-kbd-border-color:hsla(var(--md-hue),15%,14%,1);--md-typeset-table-color:hsla(var(--md-hue),75%,95%,0.12);--md-admonition-bg-color:hsla(var(--md-hue),0%,100%,0.025);--md-footer-bg-color:hsla(var(--md-hue),15%,12%,0.87);--md-footer-bg-color--dark:hsla(var(--md-hue),15%,10%,1)}[data-md-color-scheme=slate][data-md-color-primary=black],[data-md-color-scheme=slate][data-md-color-primary=white]{--md-typeset-a-color:#5d6cc0}[data-md-color-scheme=slate] img[src$="#only-light"]{display:none}[data-md-color-scheme=slate] img[src$="#only-dark"]{display:initial}} \ No newline at end of file diff --git a/assets/stylesheets/palette.e6a45f82.min.css.map b/assets/stylesheets/palette.e6a45f82.min.css.map deleted file mode 100644 index b33c518da16f..000000000000 --- a/assets/stylesheets/palette.e6a45f82.min.css.map +++ /dev/null @@ -1 +0,0 @@ -{"version":3,"sources":["src/assets/stylesheets/palette/_accent.scss","../../../src/assets/stylesheets/palette.scss","src/assets/stylesheets/palette/_primary.scss","src/assets/stylesheets/utilities/_break.scss","src/assets/stylesheets/palette/_scheme.scss"],"names":[],"mappings":"AA8CE,2BACE,4BAAA,CACA,oDAAA,CAOE,yBAAA,CACA,8CCnDN,CDyCE,4BACE,4BAAA,CACA,mDAAA,CAOE,yBAAA,CACA,8CC5CN,CDkCE,8BACE,4BAAA,CACA,qDAAA,CAOE,yBAAA,CACA,8CCrCN,CD2BE,mCACE,4BAAA,CACA,qDAAA,CAOE,yBAAA,CACA,8CC9BN,CDoBE,8BACE,4BAAA,CACA,qDAAA,CAOE,yBAAA,CACA,8CCvBN,CDaE,4BACE,4BAAA,CACA,qDAAA,CAOE,yBAAA,CACA,8CChBN,CDME,kCACE,4BAAA,CACA,oDAAA,CAOE,yBAAA,CACA,8CCTN,CDDE,4BACE,4BAAA,CACA,oDAAA,CAOE,yBAAA,CACA,8CCFN,CDRE,4BACE,4BAAA,CACA,oDAAA,CAOE,yBAAA,CACA,8CCKN,CDfE,6BACE,4BAAA,CACA,mDAAA,CAOE,yBAAA,CACA,8CCYN,CDtBE,mCACE,4BAAA,CACA,oDAAA,CAOE,yBAAA,CACA,8CCmBN,CD7BE,4BACE,4BAAA,CACA,oDAAA,CAIE,oCAAA,CACA,2CC6BN,CDpCE,8BACE,4BAAA,CACA,oDAAA,CAIE,oCAAA,CACA,2CCoCN,CD3CE,6BACE,yBAAA,CACA,oDAAA,CAIE,oCAAA,CACA,2CC2CN,CDlDE,8BACE,4BAAA,CACA,oDAAA,CAIE,oCAAA,CACA,2CCkDN,CDzDE,mCACE,4BAAA,CACA,qDAAA,CAOE,yBAAA,CACA,8CCsDN,CC3DE,4BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,+CDwDN,CCnEE,6BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,+CDgEN,CC3EE,+BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,+CDwEN,CCnFE,oCACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,+CDgFN,CC3FE,+BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,+CDwFN,CCnGE,6BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,+CDgGN,CC3GE,mCACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,+CDwGN,CCnHE,6BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,+CDgHN,CC3HE,6BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,+CDwHN,CCnIE,8BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,+CDgIN,CC3IE,oCACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,+CDwIN,CCnJE,6BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAIE,qCAAA,CACA,4CDmJN,CC3JE,+BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAIE,qCAAA,CACA,4CD2JN,CCnKE,8BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAIE,qCAAA,CACA,4CDmKN,CC3KE,+BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAIE,qCAAA,CACA,4CD2KN,CCnLE,oCACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,+CDgLN,CC3LE,8BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,+CDwLN,CCnME,6BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,+CDgMN,CC3ME,kCACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,+CDwMN,CC9LA,8BACE,0BAAA,CACA,+CAAA,CACA,2CAAA,CACA,qCAAA,CACA,4CAAA,CAGA,4BD+LF,CE9EI,mCD3GA,+CACE,gCD4LJ,CCzLI,qDACE,gCD2LN,CCtLE,iEACE,qBDwLJ,CACF,CEzFI,sCDxFA,uCACE,0CDoLJ,CACF,CC3KA,8BACE,0BAAA,CACA,4CAAA,CACA,gCAAA,CACA,0BAAA,CACA,+CAAA,CAGA,4BD4KF,CCzKE,yCACE,qBD2KJ,CEvFI,wCD7EA,8CACE,gCDuKJ,CACF,CE/GI,mCDjDA,+CACE,oCDmKJ,CChKI,qDACE,mCDkKN,CACF,CEpGI,wCDtDA,iFACE,qBD6JJ,CACF,CE5HI,sCD1BA,uCACE,qBDyJJ,CACF,CGvSA,cAGE,6BAKE,YAAA,CAGA,mDAAA,CACA,6DAAA,CACA,+DAAA,CACA,gEAAA,CACA,mDAAA,CACA,6DAAA,CACA,+DAAA,CACA,gEAAA,CAGA,gDAAA,CACA,gDAAA,CAGA,uCAAA,CACA,iCAAA,CACA,kCAAA,CACA,mCAAA,CACA,mCAAA,CACA,kCAAA,CACA,iCAAA,CACA,+CAAA,CACA,6DAAA,CACA,gEAAA,CACA,4DAAA,CACA,4DAAA,CACA,6DAAA,CAGA,6CAAA,CAGA,+CAAA,CAGA,2CAAA,CAGA,uDAAA,CACA,6DAAA,CACA,2DAAA,CAGA,yDAAA,CAGA,0DAAA,CAGA,qDAAA,CACA,wDHgRF,CG7QE,oHAIE,4BH4QJ,CGxQE,qDACE,YH0QJ,CGtQE,oDACE,eHwQJ,CACF","file":"palette.css"} \ No newline at end of file diff --git a/async-pattern/index.html b/async-pattern/index.html index b3e8d19bb644..86162b18e7a0 100644 --- a/async-pattern/index.html +++ b/async-pattern/index.html @@ -1,4070 +1,68 @@ - - - - - - - - - - - - - Asynchronous Job Pattern - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Asynchronous Job Pattern - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - -
    -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Asynchronous Job Pattern

    -

    Introduction

    -

    If triggering an external job (e.g. an Amazon EMR job) from Argo that does not run to completion in a container, there are two options:

    -
      -
    • create a container that polls the external job completion status
    • -
    • combine a trigger step that starts the job with a suspend step that is resumed by an API call to Argo when the external job is complete.
    • -
    -

    This document describes the second option in more detail.

    -

    The pattern

    -

    The pattern involves two steps - the first step is a short-running step that triggers a long-running job outside Argo (e.g. an HTTP submission), and the second step is a suspend step that suspends workflow execution and is ultimately either resumed or stopped (i.e. failed) via a call to the Argo API when the job outside Argo succeeds or fails.

    -

    When implemented as a WorkflowTemplate it can look something like this:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: WorkflowTemplate
    -metadata:
    -  name: external-job-template
    -spec:
    -  entrypoint: run-external-job
    -  arguments:
    -    parameters:
    -      - name: "job-cmd"
    -  templates:
    -    - name: run-external-job
    -      inputs:
    -        parameters:
    -          - name: "job-cmd"
    -            value: "{{workflow.parameters.job-cmd}}"
    -      steps:
    -        - - name: trigger-job
    -            template: trigger-job
    -            arguments:
    -              parameters:
    -                - name: "job-cmd"
    -                  value: "{{inputs.parameters.job-cmd}}"
    -        - - name: wait-completion
    -            template: wait-completion
    -            arguments:
    -              parameters:
    -                - name: uuid
    -                  value: "{{steps.trigger-job.outputs.result}}"
    -
    -    - name: trigger-job
    -      inputs:
    -        parameters:
    -          - name: "job-cmd"
    -      container:
    -        image: appropriate/curl:latest
    -        command: [ "/bin/sh", "-c" ]
    -        args: [ "{{inputs.parameters.job-cmd}}" ]
    -
    -    - name: wait-completion
    -      inputs:
    -        parameters:
    -          - name: uuid
    -      suspend: { }
    -
    -

    In this case the job-cmd parameter can be a command that makes an HTTP call via curl to an endpoint that returns a job UUID. More sophisticated submission and parsing of submission output could be done with something like a Python script step.

    -

    On job completion the external job would need to call either resume if successful:

    -

    You may need an access token.

    -
    curl --request PUT \
    -  --url https://localhost:2746/api/v1/workflows/<NAMESPACE>/<WORKFLOWNAME>/resume
    -  --header 'content-type: application/json' \
    -  --header "Authorization: $ARGO_TOKEN" \
    -  --data '{
    -      "namespace": "<NAMESPACE>",
    -      "name": "<WORKFLOWNAME>",
    -      "nodeFieldSelector": "inputs.parameters.uuid.value=<UUID>"
    -    }'
    -
    -

    or stop if unsuccessful:

    -
    curl --request PUT \
    -  --url https://localhost:2746/api/v1/workflows/<NAMESPACE>/<WORKFLOWNAME>/stop
    -  --header 'content-type: application/json' \
    -  --header "Authorization: $ARGO_TOKEN" \
    -  --data '{
    -      "namespace": "<NAMESPACE>",
    -      "name": "<WORKFLOWNAME>",
    -      "nodeFieldSelector": "inputs.parameters.uuid.value=<UUID>",
    -      "message": "<FAILURE-MESSAGE>"
    -    }'
    -
    -

    Retrying failed jobs

    -

    Using argo retry on failed jobs that follow this pattern will cause Argo to re-attempt the suspend step without re-triggering the job.

    -

    Instead you need to use the --restart-successful option, e.g. if using the template from above:

    -
    argo retry <WORKFLOWNAME> --restart-successful --node-field-selector templateRef.template=run-external-job,phase=Failed
    -
    - - - - -

    Comments

    - - +

    Asynchronous Job Pattern - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo/index.html b/cli/argo/index.html index a5d58ad09ec5..f51814284b5e 100644 --- a/cli/argo/index.html +++ b/cli/argo/index.html @@ -1,4172 +1,68 @@ - - - - - - - - - - - - - argo - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - -
    -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo

    - -

    argo

    -

    argo is the command line interface to Argo

    -

    Synopsis

    -

    You can use the CLI in the following modes:

    -

    Kubernetes API Mode (default)

    -

    Requests are sent directly to the Kubernetes API. No Argo Server is needed. Large workflows and the workflow archive are not supported.

    -

    Use when you have direct access to the Kubernetes API, and don't need large workflow or workflow archive support.

    -

    If you're using instance ID (which is very unlikely), you'll need to set it:

    -
    ARGO_INSTANCEID=your-instanceid
    -
    - -

    Argo Server GRPC Mode

    -

    Requests are sent to the Argo Server API via GRPC (using HTTP/2). Large workflows and the workflow archive are supported. Network load-balancers that do not support HTTP/2 are not supported.

    -

    Use if you do not have access to the Kubernetes API (e.g. you're in another cluster), and you're running the Argo Server using a network load-balancer that support HTTP/2.

    -

    To enable, set ARGO_SERVER:

    -
    ARGO_SERVER=localhost:2746 ;# The format is "host:port" - do not prefix with "http" or "https"
    -
    - -

    If you're have transport-layer security (TLS) enabled (i.e. you are running "argo server --secure" and therefore has HTTPS):

    -
    ARGO_SECURE=true
    -
    - -

    If your server is running with self-signed certificates. Do not use in production:

    -
    ARGO_INSECURE_SKIP_VERIFY=true
    -
    - -

    By default, the CLI uses your KUBECONFIG to determine default for ARGO_TOKEN and ARGO_NAMESPACE. You probably error with "no configuration has been provided". To prevent it:

    -
    KUBECONFIG=/dev/null
    -
    - -

    You will then need to set:

    -
    ARGO_NAMESPACE=argo
    -
    - -

    And:

    -
    ARGO_TOKEN='Bearer ******' ;# Should always start with "Bearer " or "Basic ".
    -
    - -

    Argo Server HTTP1 Mode

    -

    As per GRPC mode, but uses HTTP. Can be used with ALB that does not support HTTP/2. The command "argo logs --since-time=2020...." will not work (due to time-type).

    -

    Use this when your network load-balancer does not support HTTP/2.

    -

    Use the same configuration as GRPC mode, but also set:

    -
    ARGO_HTTP1=true
    -
    - -

    If your server is behind an ingress with a path (you'll be running "argo server --basehref /...) or "BASE_HREF=/... argo server"):

    -
    ARGO_BASE_HREF=/argo
    -
    - -
    argo [flags]
    -
    -

    Options

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -  -h, --help                           help for argo
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_archive/index.html b/cli/argo_archive/index.html index 0e7904f062db..af37d0507a38 100644 --- a/cli/argo_archive/index.html +++ b/cli/argo_archive/index.html @@ -1,4056 +1,68 @@ - - - - - - - - - - - - - argo archive - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo archive - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - -
    -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo archive

    - -

    argo archive

    -

    manage the workflow archive

    -
    argo archive [flags]
    -
    -

    Options

    -
      -h, --help   help for archive
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo archive - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_archive_delete/index.html b/cli/argo_archive_delete/index.html index 43829da63c79..8861060dd370 100644 --- a/cli/argo_archive_delete/index.html +++ b/cli/argo_archive_delete/index.html @@ -1,4049 +1,68 @@ - - - - - - - - - - - - - argo archive delete - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo archive delete - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - -
    -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo archive delete

    - -

    argo archive delete

    -

    delete a workflow in the archive

    -
    argo archive delete UID... [flags]
    -
    -

    Options

    -
      -h, --help   help for delete
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo archive delete - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_archive_get/index.html b/cli/argo_archive_get/index.html index 01128abb036d..a82669071321 100644 --- a/cli/argo_archive_get/index.html +++ b/cli/argo_archive_get/index.html @@ -1,4050 +1,68 @@ - - - - - - - - - - - - - argo archive get - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo archive get - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - -
    -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo archive get

    - -

    argo archive get

    -

    get a workflow in the archive

    -
    argo archive get UID [flags]
    -
    -

    Options

    -
      -h, --help            help for get
    -  -o, --output string   Output format. One of: json|yaml|wide (default "wide")
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo archive get - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_archive_list-label-keys/index.html b/cli/argo_archive_list-label-keys/index.html index 2842dbd71d34..bee00e7c72a4 100644 --- a/cli/argo_archive_list-label-keys/index.html +++ b/cli/argo_archive_list-label-keys/index.html @@ -1,4049 +1,68 @@ - - - - - - - - - - - - - argo archive list-label-keys - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo archive list-label-keys - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - -
    -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo archive list-label-keys

    - -

    argo archive list-label-keys

    -

    list workflows label keys in the archive

    -
    argo archive list-label-keys [flags]
    -
    -

    Options

    -
      -h, --help   help for list-label-keys
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo archive list-label-keys - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_archive_list-label-values/index.html b/cli/argo_archive_list-label-values/index.html index 914506bb3332..23ed69ccc695 100644 --- a/cli/argo_archive_list-label-values/index.html +++ b/cli/argo_archive_list-label-values/index.html @@ -1,4050 +1,68 @@ - - - - - - - - - - - - - argo archive list-label-values - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo archive list-label-values - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - -
    -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo archive list-label-values

    - -

    argo archive list-label-values

    -

    get workflow label values in the archive

    -
    argo archive list-label-values [flags]
    -
    -

    Options

    -
      -h, --help              help for list-label-values
    -  -l, --selector string   Selector (label query) to query on, allows 1 value (e.g. -l key1)
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo archive list-label-values - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_archive_list/index.html b/cli/argo_archive_list/index.html index 64ce7557d2a0..e25038b66e85 100644 --- a/cli/argo_archive_list/index.html +++ b/cli/argo_archive_list/index.html @@ -1,4052 +1,68 @@ - - - - - - - - - - - - - argo archive list - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo archive list - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - -
    -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo archive list

    - -

    argo archive list

    -

    list workflows in the archive

    -
    argo archive list [flags]
    -
    -

    Options

    -
          --chunk-size int    Return large lists in chunks rather than all at once. Pass 0 to disable.
    -  -h, --help              help for list
    -  -o, --output string     Output format. One of: json|yaml|wide (default "wide")
    -  -l, --selector string   Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo archive list - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_archive_resubmit/index.html b/cli/argo_archive_resubmit/index.html index 1d4a82b44a90..61226570006c 100644 --- a/cli/argo_archive_resubmit/index.html +++ b/cli/argo_archive_resubmit/index.html @@ -1,4101 +1,68 @@ - - - - - - - - - - - - - argo archive resubmit - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo archive resubmit - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - -
    -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo archive resubmit

    - -

    argo archive resubmit

    -

    resubmit one or more workflows

    -
    argo archive resubmit [WORKFLOW...] [flags]
    -
    -

    Examples

    -
    # Resubmit a workflow:
    -
    -  argo archive resubmit uid
    -
    -# Resubmit multiple workflows:
    -
    -  argo archive resubmit uid another-uid
    -
    -# Resubmit multiple workflows by label selector:
    -
    -  argo archive resubmit -l workflows.argoproj.io/test=true
    -
    -# Resubmit multiple workflows by field selector:
    -
    -  argo archive resubmit --field-selector metadata.namespace=argo
    -
    -# Resubmit and wait for completion:
    -
    -  argo archive resubmit --wait uid
    -
    -# Resubmit and watch until completion:
    -
    -  argo archive resubmit --watch uid
    -
    -# Resubmit and tail logs until completion:
    -
    -  argo archive resubmit --log uid
    -
    -

    Options

    -
          --field-selector string   Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.
    -  -h, --help                    help for resubmit
    -      --log                     log the workflow until it completes
    -      --memoized                re-use successful steps & outputs from the previous run
    -  -o, --output string           Output format. One of: name|json|yaml|wide
    -  -p, --parameter stringArray   input parameter to override on the original workflow spec
    -      --priority int32          workflow priority
    -  -l, --selector string         Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
    -  -w, --wait                    wait for the workflow to complete, only works when a single workflow is resubmitted
    -      --watch                   watch the workflow until it completes, only works when a single workflow is resubmitted
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo archive resubmit - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_archive_retry/index.html b/cli/argo_archive_retry/index.html index 4b6ef73f8cae..55d4222b7afe 100644 --- a/cli/argo_archive_retry/index.html +++ b/cli/argo_archive_retry/index.html @@ -1,4101 +1,68 @@ - - - - - - - - - - - - - argo archive retry - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo archive retry - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - -
    -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo archive retry

    - -

    argo archive retry

    -

    retry zero or more workflows

    -
    argo archive retry [WORKFLOW...] [flags]
    -
    -

    Examples

    -
    # Retry a workflow:
    -
    -  argo archive retry uid
    -
    -# Retry multiple workflows:
    -
    -  argo archive retry uid another-uid
    -
    -# Retry multiple workflows by label selector:
    -
    -  argo archive retry -l workflows.argoproj.io/test=true
    -
    -# Retry multiple workflows by field selector:
    -
    -  argo archive retry --field-selector metadata.namespace=argo
    -
    -# Retry and wait for completion:
    -
    -  argo archive retry --wait uid
    -
    -# Retry and watch until completion:
    -
    -  argo archive retry --watch uid
    -
    -# Retry and tail logs until completion:
    -
    -  argo archive retry --log uid
    -
    -

    Options

    -
          --field-selector string        Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.
    -  -h, --help                         help for retry
    -      --log                          log the workflow until it completes
    -      --node-field-selector string   selector of nodes to reset, eg: --node-field-selector inputs.paramaters.myparam.value=abc
    -  -o, --output string                Output format. One of: name|json|yaml|wide
    -  -p, --parameter stringArray        input parameter to override on the original workflow spec
    -      --restart-successful           indicates to restart successful nodes matching the --node-field-selector
    -  -l, --selector string              Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
    -  -w, --wait                         wait for the workflow to complete, only works when a single workflow is retried
    -      --watch                        watch the workflow until it completes, only works when a single workflow is retried
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo archive retry - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_auth/index.html b/cli/argo_auth/index.html index 8958ebeb4a2f..8c99362ffeea 100644 --- a/cli/argo_auth/index.html +++ b/cli/argo_auth/index.html @@ -1,4050 +1,68 @@ - - - - - - - - - - - - - argo auth - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo auth - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - -
    -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo auth

    - -

    argo auth

    -

    manage authentication settings

    -
    argo auth [flags]
    -
    -

    Options

    -
      -h, --help   help for auth
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo auth - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_auth_token/index.html b/cli/argo_auth_token/index.html index bb01cf0fbfab..76daa328dd4f 100644 --- a/cli/argo_auth_token/index.html +++ b/cli/argo_auth_token/index.html @@ -1,4049 +1,68 @@ - - - - - - - - - - - - - argo auth token - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo auth token - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - -
    -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo auth token

    - -

    argo auth token

    -

    Print the auth token

    -
    argo auth token [flags]
    -
    -

    Options

    -
      -h, --help   help for token
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    -
      -
    • argo auth - manage authentication settings
    • -
    - - - - -

    Comments

    - - +

    argo auth token - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_cluster-template/index.html b/cli/argo_cluster-template/index.html index 16fd0efcdbd2..ead2dcd042bd 100644 --- a/cli/argo_cluster-template/index.html +++ b/cli/argo_cluster-template/index.html @@ -1,4054 +1,68 @@ - - - - - - - - - - - - - argo cluster-template - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo cluster-template - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo cluster-template

    - -

    argo cluster-template

    -

    manipulate cluster workflow templates

    -
    argo cluster-template [flags]
    -
    -

    Options

    -
      -h, --help   help for cluster-template
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo cluster-template - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_cluster-template_create/index.html b/cli/argo_cluster-template_create/index.html index f54da7cfc88b..81111cc9f5ff 100644 --- a/cli/argo_cluster-template_create/index.html +++ b/cli/argo_cluster-template_create/index.html @@ -1,4075 +1,68 @@ - - - - - - - - - - - - - argo cluster-template create - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo cluster-template create - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo cluster-template create

    - -

    argo cluster-template create

    -

    create a cluster workflow template

    -
    argo cluster-template create FILE1 FILE2... [flags]
    -
    -

    Examples

    -
    # Create a Cluster Workflow Template:
    -  argo cluster-template create FILE1
    -
    -# Create a Cluster Workflow Template and print it as YAML:
    -  argo cluster-template create FILE1 --output yaml
    -
    -# Create a Cluster Workflow Template with relaxed validation:
    -  argo cluster-template create FILE1 --strict false
    -
    -

    Options

    -
      -h, --help            help for create
    -  -o, --output string   Output format. One of: name|json|yaml|wide
    -      --strict          perform strict workflow validation (default true)
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo cluster-template create - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_cluster-template_delete/index.html b/cli/argo_cluster-template_delete/index.html index e54956fc602a..32113a719e51 100644 --- a/cli/argo_cluster-template_delete/index.html +++ b/cli/argo_cluster-template_delete/index.html @@ -1,4050 +1,68 @@ - - - - - - - - - - - - - argo cluster-template delete - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo cluster-template delete - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo cluster-template delete

    - -

    argo cluster-template delete

    -

    delete a cluster workflow template

    -
    argo cluster-template delete WORKFLOW_TEMPLATE [flags]
    -
    -

    Options

    -
          --all    Delete all cluster workflow templates
    -  -h, --help   help for delete
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo cluster-template delete - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_cluster-template_get/index.html b/cli/argo_cluster-template_get/index.html index 9fa7467d8627..a94d501f522d 100644 --- a/cli/argo_cluster-template_get/index.html +++ b/cli/argo_cluster-template_get/index.html @@ -1,4050 +1,68 @@ - - - - - - - - - - - - - argo cluster-template get - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo cluster-template get - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo cluster-template get

    - -

    argo cluster-template get

    -

    display details about a cluster workflow template

    -
    argo cluster-template get CLUSTER WORKFLOW_TEMPLATE... [flags]
    -
    -

    Options

    -
      -h, --help            help for get
    -  -o, --output string   Output format. One of: json|yaml|wide
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo cluster-template get - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_cluster-template_lint/index.html b/cli/argo_cluster-template_lint/index.html index 91e3b9a1d15e..c2635a5d4ff3 100644 --- a/cli/argo_cluster-template_lint/index.html +++ b/cli/argo_cluster-template_lint/index.html @@ -1,4051 +1,68 @@ - - - - - - - - - - - - - argo cluster-template lint - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo cluster-template lint - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo cluster-template lint

    - -

    argo cluster-template lint

    -

    validate files or directories of cluster workflow template manifests

    -
    argo cluster-template lint FILE... [flags]
    -
    -

    Options

    -
      -h, --help            help for lint
    -  -o, --output string   Linting results output format. One of: pretty|simple (default "pretty")
    -      --strict          perform strict workflow validation (default true)
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo cluster-template lint - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_cluster-template_list/index.html b/cli/argo_cluster-template_list/index.html index 09883c8c113a..71016dd12314 100644 --- a/cli/argo_cluster-template_list/index.html +++ b/cli/argo_cluster-template_list/index.html @@ -1,4074 +1,68 @@ - - - - - - - - - - - - - argo cluster-template list - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo cluster-template list - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo cluster-template list

    - -

    argo cluster-template list

    -

    list cluster workflow templates

    -
    argo cluster-template list [flags]
    -
    -

    Examples

    -
    # List Cluster Workflow Templates:
    -  argo cluster-template list
    -
    -# List Cluster Workflow Templates with additional details such as labels, annotations, and status:
    -  argo cluster-template list --output wide
    -
    -# List Cluster Workflow Templates by name only:
    -  argo cluster-template list -o name
    -
    -

    Options

    -
      -h, --help            help for list
    -  -o, --output string   Output format. One of: wide|name
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo cluster-template list - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_completion/index.html b/cli/argo_completion/index.html index 8d7de59b0b61..87777a249936 100644 --- a/cli/argo_completion/index.html +++ b/cli/argo_completion/index.html @@ -1,4071 +1,68 @@ - - - - - - - - - - - - - argo completion - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo completion - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo completion

    - -

    argo completion

    -

    output shell completion code for the specified shell (bash or zsh)

    -

    Synopsis

    -

    Write bash or zsh shell completion code to standard output.

    -

    For bash, ensure you have bash completions installed and enabled. -To access completions in your current shell, run -$ source <(argo completion bash) -Alternatively, write it to a file and source in .bash_profile

    -

    For zsh, output to a file in a directory referenced by the $fpath shell -variable.

    -
    argo completion SHELL [flags]
    -
    -

    Options

    -
      -h, --help   help for completion
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    -
      -
    • argo - argo is the command line interface to Argo
    • -
    - - - - -

    Comments

    - - +

    argo completion - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_cp/index.html b/cli/argo_cp/index.html index e1e3bd9d4905..e77af9071bbe 100644 --- a/cli/argo_cp/index.html +++ b/cli/argo_cp/index.html @@ -1,4076 +1,68 @@ - - - - - - - - - - - - - argo cp - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo cp - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo cp

    - -

    argo cp

    -

    copy artifacts from workflow

    -
    argo cp my-wf output-directory ... [flags]
    -
    -

    Examples

    -
    # Copy a workflow's artifacts to a local output directory:
    -
    -  argo cp my-wf output-directory
    -
    -# Copy artifacts from a specific node in a workflow to a local output directory:
    -
    -  argo cp my-wf output-directory --node-id=my-wf-node-id-123
    -
    -

    Options

    -
          --artifact-name string   name of output artifact in workflow
    -  -h, --help                   help for cp
    -  -n, --namespace string       namespace of workflow
    -      --node-id string         id of node in workflow
    -      --path string            use variables {workflowName}, {nodeId}, {templateName}, {artifactName}, and {namespace} to create a customized path to store the artifacts; example: {workflowName}/{templateName}/{artifactName} (default "{namespace}/{workflowName}/{nodeId}/outputs/{artifactName}")
    -      --template-name string   name of template in workflow
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    -
      -
    • argo - argo is the command line interface to Argo
    • -
    - - - - -

    Comments

    - - +

    argo cp - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_cron/index.html b/cli/argo_cron/index.html index ee99158d8097..0f736887aed2 100644 --- a/cli/argo_cron/index.html +++ b/cli/argo_cron/index.html @@ -1,4072 +1,68 @@ - - - - - - - - - - - - - argo cron - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo cron - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo cron

    - -

    argo cron

    -

    manage cron workflows

    -

    Synopsis

    -

    NextScheduledRun assumes that the workflow-controller uses UTC as its timezone

    -
    argo cron [flags]
    -
    -

    Options

    -
      -h, --help   help for cron
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo cron - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_cron_create/index.html b/cli/argo_cron_create/index.html index f0d56f5ba49f..f811098087b0 100644 --- a/cli/argo_cron_create/index.html +++ b/cli/argo_cron_create/index.html @@ -1,4059 +1,68 @@ - - - - - - - - - - - - - argo cron create - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo cron create - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo cron create

    - -

    argo cron create

    -

    create a cron workflow

    -
    argo cron create FILE1 FILE2... [flags]
    -
    -

    Options

    -
          --entrypoint string       override entrypoint
    -      --generate-name string    override metadata.generateName
    -  -h, --help                    help for create
    -  -l, --labels string           Comma separated labels to apply to the workflow. Will override previous values.
    -      --name string             override metadata.name
    -  -o, --output string           Output format. One of: name|json|yaml|wide
    -  -p, --parameter stringArray   pass an input parameter
    -  -f, --parameter-file string   pass a file containing all input parameters
    -      --schedule string         override cron workflow schedule
    -      --serviceaccount string   run all pods in the workflow using specified serviceaccount
    -      --strict                  perform strict workflow validation (default true)
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo cron create - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_cron_delete/index.html b/cli/argo_cron_delete/index.html index 89dbdae0721e..70a6e9f4866b 100644 --- a/cli/argo_cron_delete/index.html +++ b/cli/argo_cron_delete/index.html @@ -1,4050 +1,68 @@ - - - - - - - - - - - - - argo cron delete - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo cron delete - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo cron delete

    - -

    argo cron delete

    -

    delete a cron workflow

    -
    argo cron delete [CRON_WORKFLOW... | --all] [flags]
    -
    -

    Options

    -
          --all    Delete all cron workflows
    -  -h, --help   help for delete
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo cron delete - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_cron_get/index.html b/cli/argo_cron_get/index.html index 93d2ba4874da..3c78c55ea7c9 100644 --- a/cli/argo_cron_get/index.html +++ b/cli/argo_cron_get/index.html @@ -1,4050 +1,68 @@ - - - - - - - - - - - - - argo cron get - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo cron get - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo cron get

    - -

    argo cron get

    -

    display details about a cron workflow

    -
    argo cron get CRON_WORKFLOW... [flags]
    -
    -

    Options

    -
      -h, --help            help for get
    -  -o, --output string   Output format. One of: json|yaml|wide
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo cron get - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_cron_lint/index.html b/cli/argo_cron_lint/index.html index b576e543d2e3..4e6ba656c1ca 100644 --- a/cli/argo_cron_lint/index.html +++ b/cli/argo_cron_lint/index.html @@ -1,4051 +1,68 @@ - - - - - - - - - - - - - argo cron lint - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo cron lint - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo cron lint

    - -

    argo cron lint

    -

    validate files or directories of cron workflow manifests

    -
    argo cron lint FILE... [flags]
    -
    -

    Options

    -
      -h, --help            help for lint
    -  -o, --output string   Linting results output format. One of: pretty|simple (default "pretty")
    -      --strict          perform strict validation (default true)
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo cron lint - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_cron_list/index.html b/cli/argo_cron_list/index.html index 7c9b28746a99..cc80c4d36eb2 100644 --- a/cli/argo_cron_list/index.html +++ b/cli/argo_cron_list/index.html @@ -1,4052 +1,68 @@ - - - - - - - - - - - - - argo cron list - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo cron list - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo cron list

    - -

    argo cron list

    -

    list cron workflows

    -
    argo cron list [flags]
    -
    -

    Options

    -
      -A, --all-namespaces    Show workflows from all namespaces
    -  -h, --help              help for list
    -  -o, --output string     Output format. One of: wide|name
    -  -l, --selector string   Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints.
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo cron list - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_cron_resume/index.html b/cli/argo_cron_resume/index.html index d8d933626d62..0f2d90ebd579 100644 --- a/cli/argo_cron_resume/index.html +++ b/cli/argo_cron_resume/index.html @@ -1,4049 +1,68 @@ - - - - - - - - - - - - - argo cron resume - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo cron resume - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo cron resume

    - -

    argo cron resume

    -

    resume zero or more cron workflows

    -
    argo cron resume [CRON_WORKFLOW...] [flags]
    -
    -

    Options

    -
      -h, --help   help for resume
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo cron resume - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_cron_suspend/index.html b/cli/argo_cron_suspend/index.html index 107634bc49cd..dbc8dee91a6a 100644 --- a/cli/argo_cron_suspend/index.html +++ b/cli/argo_cron_suspend/index.html @@ -1,4049 +1,68 @@ - - - - - - - - - - - - - argo cron suspend - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo cron suspend - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo cron suspend

    - -

    argo cron suspend

    -

    suspend zero or more cron workflows

    -
    argo cron suspend CRON_WORKFLOW... [flags]
    -
    -

    Options

    -
      -h, --help   help for suspend
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo cron suspend - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_delete/index.html b/cli/argo_delete/index.html index 38cf119a5dca..e07d9e44dfd8 100644 --- a/cli/argo_delete/index.html +++ b/cli/argo_delete/index.html @@ -1,4084 +1,68 @@ - - - - - - - - - - - - - argo delete - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo delete - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo delete

    - -

    argo delete

    -

    delete workflows

    -
    argo delete [--dry-run] [WORKFLOW...|[--all] [--older] [--completed] [--resubmitted] [--prefix PREFIX] [--selector SELECTOR] [--force] [--status STATUS] ] [flags]
    -
    -

    Examples

    -
    # Delete a workflow:
    -
    -  argo delete my-wf
    -
    -# Delete the latest workflow:
    -
    -  argo delete @latest
    -
    -

    Options

    -
          --all                     Delete all workflows
    -  -A, --all-namespaces          Delete workflows from all namespaces
    -      --completed               Delete completed workflows
    -      --dry-run                 Do not delete the workflow, only print what would happen
    -      --field-selector string   Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.
    -      --force                   Force delete workflows by removing finalizers
    -  -h, --help                    help for delete
    -      --older string            Delete completed workflows finished before the specified duration (e.g. 10m, 3h, 1d)
    -      --prefix string           Delete workflows by prefix
    -      --query-chunk-size int    Run the list query in chunks (deletes will still be executed individually)
    -      --resubmitted             Delete resubmitted workflows
    -  -l, --selector string         Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
    -      --status strings          Delete by status (comma separated)
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    -
      -
    • argo - argo is the command line interface to Argo
    • -
    - - - - -

    Comments

    - - +

    argo delete - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_executor-plugin/index.html b/cli/argo_executor-plugin/index.html index 00ecd0e83089..2704520c6679 100644 --- a/cli/argo_executor-plugin/index.html +++ b/cli/argo_executor-plugin/index.html @@ -1,4050 +1,68 @@ - - - - - - - - - - - - - argo executor-plugin - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo executor-plugin - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo executor-plugin

    - -

    argo executor-plugin

    -

    manage executor plugins

    -
    argo executor-plugin [flags]
    -
    -

    Options

    -
      -h, --help   help for executor-plugin
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo executor-plugin - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_executor-plugin_build/index.html b/cli/argo_executor-plugin_build/index.html index a5c78dd26764..70ea285d633c 100644 --- a/cli/argo_executor-plugin_build/index.html +++ b/cli/argo_executor-plugin_build/index.html @@ -1,4049 +1,68 @@ - - - - - - - - - - - - - argo executor-plugin build - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo executor-plugin build - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo executor-plugin build

    - -

    argo executor-plugin build

    -

    build an executor plugin

    -
    argo executor-plugin build DIR [flags]
    -
    -

    Options

    -
      -h, --help   help for build
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo executor-plugin build - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_get/index.html b/cli/argo_get/index.html index 42f0d6745020..89e45b51d0f0 100644 --- a/cli/argo_get/index.html +++ b/cli/argo_get/index.html @@ -1,4076 +1,68 @@ - - - - - - - - - - - - - argo get - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo get - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo get

    - -

    argo get

    -

    display details about a workflow

    -
    argo get WORKFLOW... [flags]
    -
    -

    Examples

    -
    # Get information about a workflow:
    -
    -  argo get my-wf
    -
    -# Get the latest workflow:
    -  argo get @latest
    -
    -

    Options

    -
      -h, --help                         help for get
    -      --no-color                     Disable colorized output
    -      --no-utf8                      Use plain 7-bits ascii characters
    -      --node-field-selector string   selector of node to display, eg: --node-field-selector phase=abc
    -  -o, --output string                Output format. One of: json|yaml|short|wide
    -      --status string                Filter by status (Pending, Running, Succeeded, Skipped, Failed, Error)
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    -
      -
    • argo - argo is the command line interface to Argo
    • -
    - - - - -

    Comments

    - - +

    argo get - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_lint/index.html b/cli/argo_lint/index.html index a1fae2858f8f..c811cbf7bc6f 100644 --- a/cli/argo_lint/index.html +++ b/cli/argo_lint/index.html @@ -1,4076 +1,68 @@ - - - - - - - - - - - - - argo lint - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo lint - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo lint

    - -

    argo lint

    -

    validate files or directories of manifests

    -
    argo lint FILE... [flags]
    -
    -

    Examples

    -
    # Lint all manifests in a specified directory:
    -
    -  argo lint ./manifests
    -
    -# Lint only manifests of Workflows and CronWorkflows from stdin:
    -
    -  cat manifests.yaml | argo lint --kinds=workflows,cronworkflows -
    -
    -

    Options

    -
      -h, --help            help for lint
    -      --kinds strings   Which kinds will be linted. Can be: workflows|workflowtemplates|cronworkflows|clusterworkflowtemplates (default [all])
    -      --offline         perform offline linting. For resources referencing other resources, the references will be resolved from the provided args
    -  -o, --output string   Linting results output format. One of: pretty|simple (default "pretty")
    -      --strict          Perform strict workflow validation (default true)
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    -
      -
    • argo - argo is the command line interface to Argo
    • -
    - - - - -

    Comments

    - - +

    argo lint - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_list/index.html b/cli/argo_list/index.html index cae23719382f..5516f7787360 100644 --- a/cli/argo_list/index.html +++ b/cli/argo_list/index.html @@ -1,4104 +1,68 @@ - - - - - - - - - - - - - argo list - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo list - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo list

    - -

    argo list

    -

    list workflows

    -
    argo list [flags]
    -
    -

    Examples

    -
    # List all workflows:
    -  argo list
    -
    -# List all workflows from all namespaces:
    -  argo list -A
    -
    -# List all running workflows:
    -  argo list --running
    -
    -# List all completed workflows:
    -  argo list --completed
    -
    - # List workflows created within the last 10m:
    -  argo list --since 10m
    -
    -# List workflows that finished more than 2h ago:
    -  argo list --older 2h
    -
    -# List workflows with more information (such as parameters):
    -  argo list -o wide
    -
    -# List workflows in YAML format:
    -  argo list -o yaml
    -
    -# List workflows that have both labels:
    -  argo list -l label1=value1,label2=value2
    -
    -

    Options

    -
      -A, --all-namespaces          Show workflows from all namespaces
    -      --chunk-size int          Return large lists in chunks rather than all at once. Pass 0 to disable.
    -      --completed               Show completed workflows. Mutually exclusive with --running.
    -      --field-selector string   Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.
    -  -h, --help                    help for list
    -      --no-headers              Don't print headers (default print headers).
    -      --older string            List completed workflows finished before the specified duration (e.g. 10m, 3h, 1d)
    -  -o, --output string           Output format. One of: name|wide|yaml|json
    -      --prefix string           Filter workflows by prefix
    -      --resubmitted             Show resubmitted workflows
    -      --running                 Show running workflows. Mutually exclusive with --completed.
    -  -l, --selector string         Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
    -      --since string            Show only workflows created after than a relative duration
    -      --status strings          Filter by status (comma separated)
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    -
      -
    • argo - argo is the command line interface to Argo
    • -
    - - - - -

    Comments

    - - +

    argo list - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_logs/index.html b/cli/argo_logs/index.html index 991ec46ae2da..b1625f9903cf 100644 --- a/cli/argo_logs/index.html +++ b/cli/argo_logs/index.html @@ -1,4101 +1,68 @@ - - - - - - - - - - - - - argo logs - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo logs - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo logs

    - -

    argo logs

    -

    view logs of a pod or workflow

    -
    argo logs WORKFLOW [POD] [flags]
    -
    -

    Examples

    -
    # Print the logs of a workflow:
    -
    -  argo logs my-wf
    -
    -# Follow the logs of a workflows:
    -
    -  argo logs my-wf --follow
    -
    -# Print the logs of a workflows with a selector:
    -
    -  argo logs my-wf -l app=sth
    -
    -# Print the logs of single container in a pod
    -
    -  argo logs my-wf my-pod -c my-container
    -
    -# Print the logs of a workflow's pods:
    -
    -  argo logs my-wf my-pod
    -
    -# Print the logs of a pods:
    -
    -  argo logs --since=1h my-pod
    -
    -# Print the logs of the latest workflow:
    -  argo logs @latest
    -
    -

    Options

    -
      -c, --container string    Print the logs of this container (default "main")
    -  -f, --follow              Specify if the logs should be streamed.
    -      --grep string         grep for lines
    -  -h, --help                help for logs
    -      --no-color            Disable colorized output
    -  -p, --previous            Specify if the previously terminated container logs should be returned.
    -  -l, --selector string     log selector for some pod
    -      --since duration      Only return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs. Only one of since-time / since may be used.
    -      --since-time string   Only return logs after a specific date (RFC3339). Defaults to all logs. Only one of since-time / since may be used.
    -      --tail int            If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime (default -1)
    -      --timestamps          Include timestamps on each line in the log output
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    -
      -
    • argo - argo is the command line interface to Argo
    • -
    - - - - -

    Comments

    - - +

    argo logs - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_node/index.html b/cli/argo_node/index.html index 94dfb28e7d5c..613dcf151d32 100644 --- a/cli/argo_node/index.html +++ b/cli/argo_node/index.html @@ -1,4076 +1,68 @@ - - - - - - - - - - - - - argo node - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo node - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo node

    - -

    argo node

    -

    perform action on a node in a workflow

    -
    argo node ACTION WORKFLOW FLAGS [flags]
    -
    -

    Examples

    -
    # Set outputs to a node within a workflow:
    -
    -  argo node set my-wf --output-parameter parameter-name="Hello, world!" --node-field-selector displayName=approve
    -
    -# Set the message of a node within a workflow:
    -
    -  argo node set my-wf --message "We did it!"" --node-field-selector displayName=approve
    -
    -

    Options

    -
      -h, --help                           help for node
    -  -m, --message string                 Set the message of a node, eg: --message "Hello, world!"
    -      --node-field-selector string     Selector of node to set, eg: --node-field-selector inputs.paramaters.myparam.value=abc
    -  -p, --output-parameter stringArray   Set a "supplied" output parameter of node, eg: --output-parameter parameter-name="Hello, world!"
    -      --phase string                   Phase to set the node to, eg: --phase Succeeded
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    -
      -
    • argo - argo is the command line interface to Argo
    • -
    - - - - -

    Comments

    - - +

    argo node - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_resubmit/index.html b/cli/argo_resubmit/index.html index 931881a1c7c3..4fbbb3ecfa4c 100644 --- a/cli/argo_resubmit/index.html +++ b/cli/argo_resubmit/index.html @@ -1,4121 +1,68 @@ - - - - - - - - - - - - - argo resubmit - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo resubmit - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo resubmit

    - -

    argo resubmit

    -

    resubmit one or more workflows

    -

    Synopsis

    -

    Submit a completed workflow again. Optionally override parameters and memoize. Similar to running argo submit again with the same parameters.

    -
    argo resubmit [WORKFLOW...] [flags]
    -
    -

    Examples

    -
    # Resubmit a workflow:
    -
    -  argo resubmit my-wf
    -
    -# Resubmit multiple workflows:
    -
    -  argo resubmit my-wf my-other-wf my-third-wf
    -
    -# Resubmit multiple workflows by label selector:
    -
    -  argo resubmit -l workflows.argoproj.io/test=true
    -
    -# Resubmit multiple workflows by field selector:
    -
    -  argo resubmit --field-selector metadata.namespace=argo
    -
    -# Resubmit and wait for completion:
    -
    -  argo resubmit --wait my-wf.yaml
    -
    -# Resubmit and watch until completion:
    -
    -  argo resubmit --watch my-wf.yaml
    -
    -# Resubmit and tail logs until completion:
    -
    -  argo resubmit --log my-wf.yaml
    -
    -# Resubmit the latest workflow:
    -
    -  argo resubmit @latest
    -
    -

    Options

    -
          --field-selector string   Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.
    -  -h, --help                    help for resubmit
    -      --log                     log the workflow until it completes
    -      --memoized                re-use successful steps & outputs from the previous run
    -  -o, --output string           Output format. One of: name|json|yaml|wide
    -  -p, --parameter stringArray   input parameter to override on the original workflow spec
    -      --priority int32          workflow priority
    -  -l, --selector string         Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
    -  -w, --wait                    wait for the workflow to complete, only works when a single workflow is resubmitted
    -      --watch                   watch the workflow until it completes, only works when a single workflow is resubmitted
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    -
      -
    • argo - argo is the command line interface to Argo
    • -
    - - - - -

    Comments

    - - +

    argo resubmit - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_resume/index.html b/cli/argo_resume/index.html index 192729da9a82..521c9c12a068 100644 --- a/cli/argo_resume/index.html +++ b/cli/argo_resume/index.html @@ -1,4081 +1,68 @@ - - - - - - - - - - - - - argo resume - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo resume - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo resume

    - -

    argo resume

    -

    resume zero or more workflows (opposite of suspend)

    -
    argo resume WORKFLOW1 WORKFLOW2... [flags]
    -
    -

    Examples

    -
    # Resume a workflow that has been suspended:
    -
    -  argo resume my-wf
    -
    -# Resume multiple workflows:
    -
    -  argo resume my-wf my-other-wf my-third-wf     
    -
    -# Resume the latest workflow:
    -
    -  argo resume @latest
    -
    -# Resume multiple workflows by node field selector:
    -
    -  argo resume --node-field-selector inputs.paramaters.myparam.value=abc     
    -
    -

    Options

    -
      -h, --help                         help for resume
    -      --node-field-selector string   selector of node to resume, eg: --node-field-selector inputs.paramaters.myparam.value=abc
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    -
      -
    • argo - argo is the command line interface to Argo
    • -
    - - - - -

    Comments

    - - +

    argo resume - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_retry/index.html b/cli/argo_retry/index.html index 47f0981a89b1..908c54fa458d 100644 --- a/cli/argo_retry/index.html +++ b/cli/argo_retry/index.html @@ -1,4124 +1,68 @@ - - - - - - - - - - - - - argo retry - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo retry - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo retry

    - -

    argo retry

    -

    retry zero or more workflows

    -

    Synopsis

    -

    Rerun a failed Workflow. Specifically, rerun all failed steps. The same Workflow object is used and no new Workflows are created.

    -
    argo retry [WORKFLOW...] [flags]
    -
    -

    Examples

    -
    # Retry a workflow:
    -
    -  argo retry my-wf
    -
    -# Retry multiple workflows:
    -
    -  argo retry my-wf my-other-wf my-third-wf
    -
    -# Retry multiple workflows by label selector:
    -
    -  argo retry -l workflows.argoproj.io/test=true
    -
    -# Retry multiple workflows by field selector:
    -
    -  argo retry --field-selector metadata.namespace=argo
    -
    -# Retry and wait for completion:
    -
    -  argo retry --wait my-wf.yaml
    -
    -# Retry and watch until completion:
    -
    -  argo retry --watch my-wf.yaml
    -
    -# Retry and tail logs until completion:
    -
    -  argo retry --log my-wf.yaml
    -
    -# Retry the latest workflow:
    -
    -  argo retry @latest
    -
    -# Restart node with id 5 on successful workflow, using node-field-selector
    -  argo retry my-wf --restart-successful --node-field-selector id=5
    -
    -

    Options

    -
          --field-selector string        Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.
    -  -h, --help                         help for retry
    -      --log                          log the workflow until it completes
    -      --node-field-selector string   selector of nodes to reset, eg: --node-field-selector inputs.paramaters.myparam.value=abc
    -  -o, --output string                Output format. One of: name|json|yaml|wide
    -  -p, --parameter stringArray        input parameter to override on the original workflow spec
    -      --restart-successful           indicates to restart successful nodes matching the --node-field-selector
    -  -l, --selector string              Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
    -  -w, --wait                         wait for the workflow to complete, only works when a single workflow is retried
    -      --watch                        watch the workflow until it completes, only works when a single workflow is retried
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    -
      -
    • argo - argo is the command line interface to Argo
    • -
    - - - - -

    Comments

    - - +

    argo retry - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_server/index.html b/cli/argo_server/index.html index 3c55e451ac7f..da4a3d4a43ab 100644 --- a/cli/argo_server/index.html +++ b/cli/argo_server/index.html @@ -1,4085 +1,68 @@ - - - - - - - - - - - - - argo server - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo server - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo server

    - -

    argo server

    -

    start the Argo Server

    -
    argo server [flags]
    -
    -

    Examples

    -
    See https://argoproj.github.io/argo-workflows/argo-server/
    -
    -

    Options

    -
          --access-control-allow-origin string   Set Access-Control-Allow-Origin header in HTTP responses.
    -      --allowed-link-protocol stringArray    Allowed link protocol in configMap. Used if the allowed configMap links protocol are different from http,https. Defaults to the environment variable ALLOWED_LINK_PROTOCOL (default [http,https])
    -      --api-rate-limit uint                  Set limit per IP for api ratelimiter (default 1000)
    -      --auth-mode stringArray                API server authentication mode. Any 1 or more length permutation of: client,server,sso (default [client])
    -      --basehref string                      Value for base href in index.html. Used if the server is running behind reverse proxy under subpath different from /. Defaults to the environment variable BASE_HREF. (default "/")
    -  -b, --browser                              enable automatic launching of the browser [local mode]
    -      --configmap string                     Name of K8s configmap to retrieve workflow controller configuration (default "workflow-controller-configmap")
    -      --event-async-dispatch                 dispatch event async
    -      --event-operation-queue-size int       how many events operations that can be queued at once (default 16)
    -      --event-worker-count int               how many event workers to run (default 4)
    -  -h, --help                                 help for server
    -      --hsts                                 Whether or not we should add a HTTP Secure Transport Security header. This only has effect if secure is enabled. (default true)
    -      --kube-api-burst int                   Burst to use while talking with kube-apiserver. (default 30)
    -      --kube-api-qps float32                 QPS to use while talking with kube-apiserver. (default 20)
    -      --log-format string                    The formatter to use for logs. One of: text|json (default "text")
    -      --managed-namespace string             namespace that watches, default to the installation namespace
    -      --namespaced                           run as namespaced mode
    -  -p, --port int                             Port to listen on (default 2746)
    -  -e, --secure                               Whether or not we should listen on TLS. (default true)
    -      --tls-certificate-secret-name string   The name of a Kubernetes secret that contains the server certificates
    -      --x-frame-options string               Set X-Frame-Options header in HTTP responses. (default "DENY")
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    -
      -
    • argo - argo is the command line interface to Argo
    • -
    - - - - -

    Comments

    - - +

    argo server - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_stop/index.html b/cli/argo_stop/index.html index e66999d4d621..bdbdfbeab54f 100644 --- a/cli/argo_stop/index.html +++ b/cli/argo_stop/index.html @@ -1,4101 +1,68 @@ - - - - - - - - - - - - - argo stop - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo stop - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo stop

    - -

    argo stop

    -

    stop zero or more workflows allowing all exit handlers to run

    -

    Synopsis

    -

    Stop a workflow but still run exit handlers.

    -
    argo stop WORKFLOW WORKFLOW2... [flags]
    -
    -

    Examples

    -
    # Stop a workflow:
    -
    -  argo stop my-wf
    -
    -# Stop the latest workflow:
    -
    -  argo stop @latest
    -
    -# Stop multiple workflows by label selector
    -
    -  argo stop -l workflows.argoproj.io/test=true
    -
    -# Stop multiple workflows by field selector
    -
    -  argo stop --field-selector metadata.namespace=argo
    -
    -

    Options

    -
          --dry-run                      If true, only print the workflows that would be stopped, without stopping them.
    -      --field-selector string        Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.
    -  -h, --help                         help for stop
    -      --message string               Message to add to previously running nodes
    -      --node-field-selector string   selector of node to stop, eg: --node-field-selector inputs.paramaters.myparam.value=abc
    -  -l, --selector string              Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    -
      -
    • argo - argo is the command line interface to Argo
    • -
    - - - - -

    Comments

    - - +

    argo stop - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_submit/index.html b/cli/argo_submit/index.html index 8a9ea11e1d7e..ca1d92af3c1d 100644 --- a/cli/argo_submit/index.html +++ b/cli/argo_submit/index.html @@ -1,4103 +1,68 @@ - - - - - - - - - - - - - argo submit - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo submit - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo submit

    - -

    argo submit

    -

    submit a workflow

    -
    argo submit [FILE... | --from `kind/name] [flags]
    -
    -

    Examples

    -
    # Submit multiple workflows from files:
    -
    -  argo submit my-wf.yaml
    -
    -# Submit and wait for completion:
    -
    -  argo submit --wait my-wf.yaml
    -
    -# Submit and watch until completion:
    -
    -  argo submit --watch my-wf.yaml
    -
    -# Submit and tail logs until completion:
    -
    -  argo submit --log my-wf.yaml
    -
    -# Submit a single workflow from an existing resource
    -
    -  argo submit --from cronwf/my-cron-wf
    -
    -

    Options

    -
          --dry-run                      modify the workflow on the client-side without creating it
    -      --entrypoint string            override entrypoint
    -      --from kind/name               Submit from an existing kind/name E.g., --from=cronwf/hello-world-cwf
    -      --generate-name string         override metadata.generateName
    -  -h, --help                         help for submit
    -  -l, --labels string                Comma separated labels to apply to the workflow. Will override previous values.
    -      --log                          log the workflow until it completes
    -      --name string                  override metadata.name
    -      --node-field-selector string   selector of node to display, eg: --node-field-selector phase=abc
    -  -o, --output string                Output format. One of: name|json|yaml|wide
    -  -p, --parameter stringArray        pass an input parameter
    -  -f, --parameter-file string        pass a file containing all input parameters
    -      --priority int32               workflow priority
    -      --scheduled-time string        Override the workflow's scheduledTime parameter (useful for backfilling). The time must be RFC3339
    -      --server-dry-run               send request to server with dry-run flag which will modify the workflow without creating it
    -      --serviceaccount string        run all pods in the workflow using specified serviceaccount
    -      --status string                Filter by status (Pending, Running, Succeeded, Skipped, Failed, Error). Should only be used with --watch.
    -      --strict                       perform strict workflow validation (default true)
    -  -w, --wait                         wait for the workflow to complete
    -      --watch                        watch the workflow until it completes
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    -
      -
    • argo - argo is the command line interface to Argo
    • -
    - - - - -

    Comments

    - - +

    argo submit - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_suspend/index.html b/cli/argo_suspend/index.html index 41bb6710159f..716a15e66b87 100644 --- a/cli/argo_suspend/index.html +++ b/cli/argo_suspend/index.html @@ -1,4071 +1,68 @@ - - - - - - - - - - - - - argo suspend - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo suspend - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo suspend

    - -

    argo suspend

    -

    suspend zero or more workflows (opposite of resume)

    -
    argo suspend WORKFLOW1 WORKFLOW2... [flags]
    -
    -

    Examples

    -
    # Suspend a workflow:
    -
    -  argo suspend my-wf
    -
    -# Suspend the latest workflow:
    -  argo suspend @latest
    -
    -

    Options

    -
      -h, --help   help for suspend
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    -
      -
    • argo - argo is the command line interface to Argo
    • -
    - - - - -

    Comments

    - - +

    argo suspend - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_template/index.html b/cli/argo_template/index.html index 3874c1252dd1..50887f5c8aa8 100644 --- a/cli/argo_template/index.html +++ b/cli/argo_template/index.html @@ -1,4054 +1,68 @@ - - - - - - - - - - - - - argo template - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo template - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo template

    - -

    argo template

    -

    manipulate workflow templates

    -
    argo template [flags]
    -
    -

    Options

    -
      -h, --help   help for template
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo template - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_template_create/index.html b/cli/argo_template_create/index.html index e1ed72e9bdb3..c2a96f7df8bc 100644 --- a/cli/argo_template_create/index.html +++ b/cli/argo_template_create/index.html @@ -1,4051 +1,68 @@ - - - - - - - - - - - - - argo template create - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo template create - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo template create

    - -

    argo template create

    -

    create a workflow template

    -
    argo template create FILE1 FILE2... [flags]
    -
    -

    Options

    -
      -h, --help            help for create
    -  -o, --output string   Output format. One of: name|json|yaml|wide
    -      --strict          perform strict workflow validation (default true)
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo template create - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_template_delete/index.html b/cli/argo_template_delete/index.html index 17d1bfcb9333..f49b1b74a067 100644 --- a/cli/argo_template_delete/index.html +++ b/cli/argo_template_delete/index.html @@ -1,4050 +1,68 @@ - - - - - - - - - - - - - argo template delete - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo template delete - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo template delete

    - -

    argo template delete

    -

    delete a workflow template

    -
    argo template delete WORKFLOW_TEMPLATE [flags]
    -
    -

    Options

    -
          --all    Delete all workflow templates
    -  -h, --help   help for delete
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo template delete - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_template_get/index.html b/cli/argo_template_get/index.html index e08a9d2d8057..2fb3f112dc3d 100644 --- a/cli/argo_template_get/index.html +++ b/cli/argo_template_get/index.html @@ -1,4050 +1,68 @@ - - - - - - - - - - - - - argo template get - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo template get - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo template get

    - -

    argo template get

    -

    display details about a workflow template

    -
    argo template get WORKFLOW_TEMPLATE... [flags]
    -
    -

    Options

    -
      -h, --help            help for get
    -  -o, --output string   Output format. One of: json|yaml|wide
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo template get - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_template_lint/index.html b/cli/argo_template_lint/index.html index 0d6dd21f62dc..e79c27da111f 100644 --- a/cli/argo_template_lint/index.html +++ b/cli/argo_template_lint/index.html @@ -1,4051 +1,68 @@ - - - - - - - - - - - - - argo template lint - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo template lint - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo template lint

    - -

    argo template lint

    -

    validate a file or directory of workflow template manifests

    -
    argo template lint (DIRECTORY | FILE1 FILE2 FILE3...) [flags]
    -
    -

    Options

    -
      -h, --help            help for lint
    -  -o, --output string   Linting results output format. One of: pretty|simple (default "pretty")
    -      --strict          perform strict workflow validation (default true)
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo template lint - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_template_list/index.html b/cli/argo_template_list/index.html index f64b0eb8848d..0bc0e10c16ae 100644 --- a/cli/argo_template_list/index.html +++ b/cli/argo_template_list/index.html @@ -1,4051 +1,68 @@ - - - - - - - - - - - - - argo template list - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo template list - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo template list

    - -

    argo template list

    -

    list workflow templates

    -
    argo template list [flags]
    -
    -

    Options

    -
      -A, --all-namespaces   Show workflows from all namespaces
    -  -h, --help             help for list
    -  -o, --output string    Output format. One of: wide|name
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    - - - - - -

    Comments

    - - +

    argo template list - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_terminate/index.html b/cli/argo_terminate/index.html index a959f7e51c98..eed391134374 100644 --- a/cli/argo_terminate/index.html +++ b/cli/argo_terminate/index.html @@ -1,4099 +1,68 @@ - - - - - - - - - - - - - argo terminate - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo terminate - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo terminate

    - -

    argo terminate

    -

    terminate zero or more workflows immediately

    -

    Synopsis

    -

    Immediately stop a workflow and do not run any exit handlers.

    -
    argo terminate WORKFLOW WORKFLOW2... [flags]
    -
    -

    Examples

    -
    # Terminate a workflow:
    -
    -  argo terminate my-wf
    -
    -# Terminate the latest workflow:
    -
    -  argo terminate @latest
    -
    -# Terminate multiple workflows by label selector
    -
    -  argo terminate -l workflows.argoproj.io/test=true
    -
    -# Terminate multiple workflows by field selector
    -
    -  argo terminate --field-selector metadata.namespace=argo
    -
    -

    Options

    -
          --dry-run                 Do not terminate the workflow, only print what would happen
    -      --field-selector string   Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.
    -  -h, --help                    help for terminate
    -  -l, --selector string         Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    -
      -
    • argo - argo is the command line interface to Argo
    • -
    - - - - -

    Comments

    - - +

    argo terminate - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_version/index.html b/cli/argo_version/index.html index 59825e0337d1..82b3ba9a26bf 100644 --- a/cli/argo_version/index.html +++ b/cli/argo_version/index.html @@ -1,4050 +1,68 @@ - - - - - - - - - - - - - argo version - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo version - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo version

    - -

    argo version

    -

    print version information

    -
    argo version [flags]
    -
    -

    Options

    -
      -h, --help    help for version
    -      --short   print just the version number
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    -
      -
    • argo - argo is the command line interface to Argo
    • -
    - - - - -

    Comments

    - - +

    argo version - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_wait/index.html b/cli/argo_wait/index.html index 69ce3a643040..efae90489d7b 100644 --- a/cli/argo_wait/index.html +++ b/cli/argo_wait/index.html @@ -1,4073 +1,68 @@ - - - - - - - - - - - - - argo wait - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo wait - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo wait

    - -

    argo wait

    -

    waits for workflows to complete

    -
    argo wait [WORKFLOW...] [flags]
    -
    -

    Examples

    -
    # Wait on a workflow:
    -
    -  argo wait my-wf
    -
    -# Wait on the latest workflow:
    -
    -  argo wait @latest
    -
    -

    Options

    -
      -h, --help               help for wait
    -      --ignore-not-found   Ignore the wait if the workflow is not found
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    -
      -
    • argo - argo is the command line interface to Argo
    • -
    - - - - -

    Comments

    - - +

    argo wait - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cli/argo_watch/index.html b/cli/argo_watch/index.html index ac40eb3a8126..e0c64fdd6c71 100644 --- a/cli/argo_watch/index.html +++ b/cli/argo_watch/index.html @@ -1,4074 +1,68 @@ - - - - - - - - - - - - - argo watch - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + argo watch - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    argo watch

    - -

    argo watch

    -

    watch a workflow until it completes

    -
    argo watch WORKFLOW [flags]
    -
    -

    Examples

    -
    # Watch a workflow:
    -
    -  argo watch my-wf
    -
    -# Watch the latest workflow:
    -
    -  argo watch @latest
    -
    -

    Options

    -
      -h, --help                         help for watch
    -      --node-field-selector string   selector of node to display, eg: --node-field-selector phase=abc
    -      --status string                Filter by status (Pending, Running, Succeeded, Skipped, Failed, Error)
    -
    -

    Options inherited from parent commands

    -
          --argo-base-href string          An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
    -      --argo-http1                     If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
    -  -s, --argo-server host:port          API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
    -      --as string                      Username to impersonate for the operation
    -      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
    -      --as-uid string                  UID to impersonate for the operation
    -      --certificate-authority string   Path to a cert file for the certificate authority
    -      --client-certificate string      Path to a client certificate file for TLS
    -      --client-key string              Path to a client key file for TLS
    -      --cluster string                 The name of the kubeconfig cluster to use
    -      --context string                 The name of the kubeconfig context to use
    -      --gloglevel int                  Set the glog logging level
    -  -H, --header strings                 Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
    -      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
    -  -k, --insecure-skip-verify           If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
    -      --instanceid string              submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
    -      --kubeconfig string              Path to a kube config. Only required if out-of-cluster
    -      --loglevel string                Set the logging level. One of: debug|info|warn|error (default "info")
    -  -n, --namespace string               If present, the namespace scope for this CLI request
    -      --password string                Password for basic authentication to the API server
    -      --proxy-url string               If provided, this URL will be used to connect via proxy
    -      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
    -  -e, --secure                         Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
    -      --server string                  The address and port of the Kubernetes API server
    -      --tls-server-name string         If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
    -      --token string                   Bearer token for authentication to the API server
    -      --user string                    The name of the kubeconfig user to use
    -      --username string                Username for basic authentication to the API server
    -  -v, --verbose                        Enabled verbose logging, i.e. --loglevel debug
    -
    -

    SEE ALSO

    -
      -
    • argo - argo is the command line interface to Argo
    • -
    - - - - -

    Comments

    - - +

    argo watch - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/client-libraries/index.html b/client-libraries/index.html index 1106c22465ad..8726f5103ffb 100644 --- a/client-libraries/index.html +++ b/client-libraries/index.html @@ -1,4028 +1,68 @@ - - - - - - - - - - - - - Client Libraries - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Client Libraries - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Client Libraries

    -

    This page contains an overview of the client libraries for using the Argo API from various programming languages.

    -

    To write applications using the REST API, you do not need to implement the API calls and request/response types -yourself. You can use a client library for the programming language you are using.

    -

    Client libraries often handle common tasks such as authentication for you.

    -

    Auto-generated client libraries

    -

    The following client libraries are auto-generated using OpenAPI Generator. -Please expect very minimal support from the Argo team.

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    LanguageClient LibraryExamples/Docs
    Golangapiclient.goExample
    JavaJava
    PythonPython
    -

    Community-maintained client libraries

    -

    The following client libraries are provided and maintained by their authors, not the Argo team.

    - - - - - - - - - - - - - - - - - - - - -
    LanguageClient LibraryExamples/Docs
    PythonCoulerMulti-workflow engine support Python SDK
    PythonHeraEasy and accessible Argo workflows construction and submission in Python
    - - - - -

    Comments

    - - +

    Client Libraries - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cluster-workflow-templates/index.html b/cluster-workflow-templates/index.html index f7d95e0dee4e..0129164e1cc8 100644 --- a/cluster-workflow-templates/index.html +++ b/cluster-workflow-templates/index.html @@ -1,4195 +1,68 @@ - - - - - - - - - - - - - Cluster Workflow Templates - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Cluster Workflow Templates - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Cluster Workflow Templates

    -
    -

    v2.8 and after

    -
    -

    Introduction

    -

    ClusterWorkflowTemplates are cluster scoped WorkflowTemplates. ClusterWorkflowTemplate -can be created cluster scoped like ClusterRole and can be accessed across all namespaces in the cluster.

    -

    WorkflowTemplates documentation link

    -

    Defining ClusterWorkflowTemplate

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: ClusterWorkflowTemplate
    -metadata:
    -  name: cluster-workflow-template-whalesay-template
    -spec:
    -  templates:
    -  - name: whalesay-template
    -    inputs:
    -      parameters:
    -      - name: message
    -    container:
    -      image: docker/whalesay
    -      command: [cowsay]
    -      args: ["{{inputs.parameters.message}}"]
    -
    -

    Referencing other ClusterWorkflowTemplates

    -

    You can reference templates from other ClusterWorkflowTemplates using a templateRef field with clusterScope: true . -Just as how you reference other templates within the same Workflow, you should do so from a steps or dag template.

    -

    Here is an example:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: workflow-template-hello-world-
    -spec:
    -  entrypoint: whalesay
    -  templates:
    -  - name: whalesay
    -    steps:                              # You should only reference external "templates" in a "steps" or "dag" "template".
    -      - - name: call-whalesay-template
    -          templateRef:                  # You can reference a "template" from another "WorkflowTemplate or ClusterWorkflowTemplate" using this field
    -            name: cluster-workflow-template-whalesay-template   # This is the name of the "WorkflowTemplate or ClusterWorkflowTemplate" CRD that contains the "template" you want
    -            template: whalesay-template # This is the name of the "template" you want to reference
    -            clusterScope: true          # This field indicates this templateRef is pointing ClusterWorkflowTemplate
    -          arguments:                    # You can pass in arguments as normal
    -            parameters:
    -            - name: message
    -              value: "hello world"
    -
    -
    -

    2.9 and after

    -
    -

    Create Workflow from ClusterWorkflowTemplate Spec

    -

    You can create Workflow from ClusterWorkflowTemplate spec using workflowTemplateRef with clusterScope: true. If you pass the arguments to created Workflow, it will be merged with cluster workflow template arguments

    -

    Here is an example for ClusterWorkflowTemplate with entrypoint and arguments

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: ClusterWorkflowTemplate
    -metadata:
    -  name: cluster-workflow-template-submittable
    -spec:
    -  entrypoint: whalesay-template
    -  arguments:
    -    parameters:
    -      - name: message
    -        value: hello world
    -  templates:
    -    - name: whalesay-template
    -      inputs:
    -        parameters:
    -          - name: message
    -      container:
    -        image: docker/whalesay
    -        command: [cowsay]
    -        args: ["{{inputs.parameters.message}}"]
    -
    -

    Here is an example for creating ClusterWorkflowTemplate as Workflow with passing entrypoint and arguments to ClusterWorkflowTemplate

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: cluster-workflow-template-hello-world-
    -spec:
    -  entrypoint: whalesay-template
    -  arguments:
    -    parameters:
    -      - name: message
    -        value: "from workflow"
    -  workflowTemplateRef:
    -    name: cluster-workflow-template-submittable
    -    clusterScope: true
    -
    -

    Here is an example of a creating WorkflowTemplate as Workflow and using WorkflowTemplates's entrypoint and Workflow Arguments

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: cluster-workflow-template-hello-world-
    -spec:
    -  workflowTemplateRef:
    -    name: cluster-workflow-template-submittable
    -    clusterScope: true
    -
    -

    Managing ClusterWorkflowTemplates

    -

    CLI

    -

    You can create some example templates as follows:

    -
    argo cluster-template create https://raw.githubusercontent.com/argoproj/argo-workflows/main/examples/cluster-workflow-template/clustertemplates.yaml
    -
    -

    The submit a workflow using one of those templates:

    -
    argo submit https://raw.githubusercontent.com/argoproj/argo-workflows/main/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml
    -
    -
    -

    2.7 and after

    -

    The submit a ClusterWorkflowTemplate as a Workflow:

    -
    -
    argo submit --from clusterworkflowtemplate/cluster-workflow-template-submittable
    -
    -

    kubectl

    -

    Using kubectl apply -f and kubectl get cwft

    -

    UI

    -

    ClusterWorkflowTemplate resources can also be managed by the UI

    - - - - -

    Comments

    - - +

    Cluster Workflow Templates - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/conditional-artifacts-parameters/index.html b/conditional-artifacts-parameters/index.html index 8cb0dba207ce..a83b8a99273e 100644 --- a/conditional-artifacts-parameters/index.html +++ b/conditional-artifacts-parameters/index.html @@ -1,4019 +1,68 @@ - - - - - - - - - - - - - Conditional Artifacts and Parameters - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Conditional Artifacts and Parameters - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Conditional Artifacts and Parameters

    -
    -

    v3.1 and after

    -
    -

    You can set Step/DAG level artifacts or parameters based on an expression. -Use fromExpression under a Step/DAG level output artifact and expression under a Step/DAG level output parameter.

    -

    Conditional Artifacts

    -
    - name: coinflip
    -  steps:
    -    - - name: flip-coin
    -        template: flip-coin
    -    - - name: heads
    -        template: heads
    -        when: "{{steps.flip-coin.outputs.result}} == heads"
    -      - name: tails
    -        template: tails
    -        when: "{{steps.flip-coin.outputs.result}} == tails"
    -  outputs:
    -    artifacts:
    -      - name: result
    -        fromExpression: "steps['flip-coin'].outputs.result == 'heads' ? steps.heads.outputs.artifacts.headsresult : steps.tails.outputs.artifacts.tailsresult"
    -
    - -

    Conditional Parameters

    -
        - name: coinflip
    -      steps:
    -        - - name: flip-coin
    -            template: flip-coin
    -        - - name: heads
    -            template: heads
    -            when: "{{steps.flip-coin.outputs.result}} == heads"
    -          - name: tails
    -            template: tails
    -            when: "{{steps.flip-coin.outputs.result}} == tails"
    -      outputs:
    -        parameters:
    -          - name: stepresult
    -            valueFrom:
    -              expression: "steps['flip-coin'].outputs.result == 'heads' ? steps.heads.outputs.result : steps.tails.outputs.result"
    -
    - - - - - -

    Comments

    - - +

    Conditional Artifacts and Parameters - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/configure-archive-logs/index.html b/configure-archive-logs/index.html index e53cf68486c2..55d93bb912ee 100644 --- a/configure-archive-logs/index.html +++ b/configure-archive-logs/index.html @@ -1,4083 +1,68 @@ - - - - - - - - - - - - - Configuring Archive Logs - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Configuring Archive Logs - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Configuring Archive Logs

    -

    ⚠️ We do not recommend you rely on Argo Workflows to archive logs. Instead, use a conventional Kubernetes logging facility.

    -

    To enable automatic pipeline logging, you need to configure archiveLogs at workflow-controller config-map, workflow spec, or template level. You also need to configure Artifact Repository to define where this logging artifact is stored.

    -

    Archive logs follows priorities:

    -

    workflow-controller config (on) > workflow spec (on/off) > template (on/off)

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Controller Config MapWorkflow SpecTemplateare we archiving logs?
    truetruetruetrue
    truetruefalsetrue
    truefalsetruetrue
    truefalsefalsetrue
    falsetruetruetrue
    falsetruefalsefalse
    falsefalsetruetrue
    falsefalsefalsefalse
    -

    Configuring Workflow Controller Config Map

    -

    See Workflow Controller Config Map

    -

    Configuring Workflow Spec

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: archive-location-
    -spec:
    -  archiveLogs: true
    -  entrypoint: whalesay
    -  templates:
    -  - name: whalesay
    -    container:
    -      image: docker/whalesay:latest
    -      command: [cowsay]
    -      args: ["hello world"]
    -
    -

    Configuring Workflow Template

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: archive-location-
    -spec:
    -  entrypoint: whalesay
    -  templates:
    -  - name: whalesay
    -    container:
    -      image: docker/whalesay:latest
    -      command: [cowsay]
    -      args: ["hello world"]
    -    archiveLocation:
    -      archiveLogs: true
    -
    - - - - -

    Comments

    - - +

    Configuring Archive Logs - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/configure-artifact-repository/index.html b/configure-artifact-repository/index.html index ee89618f8320..f2958302d3d1 100644 --- a/configure-artifact-repository/index.html +++ b/configure-artifact-repository/index.html @@ -1,4742 +1,68 @@ - - - - - - - - - - - - - Configuring Your Artifact Repository - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Configuring Your Artifact Repository - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - - - - +
    +
    +
    +
    - - - - - - - - -

    Configuring Your Artifact Repository

    -

    To run Argo workflows that use artifacts, you must configure and use an artifact -repository. Argo supports any S3 compatible artifact repository such as AWS, GCS -and MinIO. This section shows how to configure the artifact repository. -Subsequent sections will show how to use it.

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameInputsOutputsGarbage CollectionUsage (Feb 2020)
    ArtifactoryYesYesNo11%
    Azure BlobYesYesYes-
    GCSYesYesYes-
    GitYesNoNo-
    HDFSYesYesNo3%
    HTTPYesYesNo2%
    OSSYesYesNo-
    RawYesNoNo5%
    S3YesYesYes86%
    -

    The actual repository used by a workflow is chosen by the following rules:

    -
      -
    1. Anything explicitly configured using Artifact Repository Ref. This is the most flexible, safe, and secure option.
    2. -
    3. From a config map named artifact-repositories if it has the workflows.argoproj.io/default-artifact-repository annotation in the workflow's namespace.
    4. -
    5. From a workflow controller config-map.
    6. -
    -

    Configuring MinIO

    -

    You can install MinIO into your cluster via Helm.

    -

    First, install helm. Then, install MinIO with the below commands:

    -
    helm repo add minio https://helm.min.io/ # official minio Helm charts
    -helm repo update
    -helm install argo-artifacts minio/minio --set service.type=LoadBalancer --set fullnameOverride=argo-artifacts
    -
    -

    Login to the MinIO UI using a web browser (port 9000) after obtaining the -external IP using kubectl.

    -
    kubectl get service argo-artifacts
    -
    -

    On Minikube:

    -
    minikube service --url argo-artifacts
    -
    -

    NOTE: When MinIO is installed via Helm, it generates -credentials, which you will use to login to the UI: -Use the commands shown below to see the credentials

    -
      -
    • AccessKey: kubectl get secret argo-artifacts -o jsonpath='{.data.accesskey}' | base64 --decode
    • -
    • SecretKey: kubectl get secret argo-artifacts -o jsonpath='{.data.secretkey}' | base64 --decode
    • -
    -

    Create a bucket named my-bucket from the MinIO UI.

    -

    If MinIO is configured to use TLS you need to set the parameter insecure to false. Additionally, if MinIO is protected by certificates generated by a custom CA, you first need to save the CA certificate in a Kubernetes secret, then set the caSecret parameter accordingly. This will allow Argo to correctly verify the server certificate presented by MinIO. For example:

    -
    kubectl create secret generic my-root-ca --from-file=my-ca.pem
    -
    -
    artifacts:
    -  - s3:
    -      insecure: false
    -      caSecret:
    -        name: my-root-ca
    -        key: my-ca.pem
    -      ...
    -
    -

    Configuring AWS S3

    -

    Create your bucket and access keys for the bucket. AWS access keys have the same -permissions as the user they are associated with. In particular, you cannot -create access keys with reduced scope. If you want to limit the permissions for -an access key, you will need to create a user with just the permissions you want -to associate with the access key. Otherwise, you can just create an access key -using your existing user account.

    -
    $ export mybucket=bucket249
    -$ cat > policy.json <<EOF
    -{
    -   "Version":"2012-10-17",
    -   "Statement":[
    -      {
    -         "Effect":"Allow",
    -         "Action":[
    -            "s3:PutObject",
    -            "s3:GetObject",
    -            "s3:DeleteObject"
    -         ],
    -         "Resource":"arn:aws:s3:::$mybucket/*"
    -      },
    -      {
    -         "Effect":"Allow",
    -         "Action":[
    -            "s3:ListBucket"
    -         ],
    -         "Resource":"arn:aws:s3:::$mybucket"
    -      }
    -   ]
    -}
    -EOF
    -$ aws s3 mb s3://$mybucket [--region xxx]
    -$ aws iam create-user --user-name $mybucket-user
    -$ aws iam put-user-policy --user-name $mybucket-user --policy-name $mybucket-policy --policy-document file://policy.json
    -$ aws iam create-access-key --user-name $mybucket-user > access-key.json
    -
    -

    If you do not have Artifact Garbage Collection configured, you should remove s3:DeleteObject from the list of Actions above.

    -

    NOTE: if you want argo to figure out which region your buckets belong in, you -must additionally set the following statement policy. Otherwise, you must -specify a bucket region in your workflow configuration.

    -
          {
    -         "Effect":"Allow",
    -         "Action":[
    -            "s3:GetBucketLocation"
    -         ],
    -         "Resource":"arn:aws:s3:::*"
    -      }
    -    ...
    -
    -

    AWS S3 IRSA

    -

    If you wish to use S3 IRSA instead of passing in an accessKey and secretKey, you need to annotate the service account of both the running workflow (in order to save logs/artifacts) and the argo-server pod (in order to retrieve the logs/artifacts).

    -
    apiVersion: v1
    -kind: ServiceAccount
    -metadata:
    -  annotations:
    -    eks.amazonaws.com/role-arn: arn:aws:iam::012345678901:role/mybucket
    -  name: myserviceaccount
    -  namespace: mynamespace
    -
    -

    Configuring GCS (Google Cloud Storage)

    -

    Create a bucket from the GCP Console -(https://console.cloud.google.com/storage/browser).

    -

    There are 2 ways to configure a Google Cloud Storage.

    -

    Through Native GCS APIs

    -
      -
    • Create and download a Google Cloud service account key.
    • -
    • Create a kubernetes secret to store the key.
    • -
    • Configure gcs artifact as following in the yaml.
    • -
    -
    artifacts:
    -  - name: message
    -    path: /tmp/message
    -    gcs:
    -      bucket: my-bucket-name
    -      key: path/in/bucket
    -      # serviceAccountKeySecret is a secret selector.
    -      # It references the k8s secret named 'my-gcs-credentials'.
    -      # This secret is expected to have have the key 'serviceAccountKey',
    -      # containing the base64 encoded credentials
    -      # to the bucket.
    -      #
    -      # If it's running on GKE and Workload Identity is used,
    -      # serviceAccountKeySecret is not needed.
    -      serviceAccountKeySecret:
    -        name: my-gcs-credentials
    -        key: serviceAccountKey
    -
    -

    If it's a GKE cluster, and Workload Identity is configured, there's no need to -create the service account key and store it as a Kubernetes secret, -serviceAccountKeySecret is also not needed in this case. Please follow the -link to configure Workload Identity -(https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity).

    -

    Use S3 APIs

    -

    Enable S3 compatible access and create an access key. Note that S3 compatible -access is on a per project rather than per bucket basis.

    - -
    artifacts:
    -  - name: my-output-artifact
    -    path: /my-output-artifact
    -    s3:
    -      endpoint: storage.googleapis.com
    -      bucket: my-gcs-bucket-name
    -      # NOTE that, by default, all output artifacts are automatically tarred and
    -      # gzipped before saving. So as a best practice, .tgz or .tar.gz
    -      # should be incorporated into the key name so the resulting file
    -      # has an accurate file extension.
    -      key: path/in/bucket/my-output-artifact.tgz
    -      accessKeySecret:
    -        name: my-gcs-s3-credentials
    -        key: accessKey
    -      secretKeySecret:
    -        name: my-gcs-s3-credentials
    -        key: secretKey
    -
    -

    Configuring Alibaba Cloud OSS (Object Storage Service)

    -

    Create your bucket and access key for the bucket. Suggest to limit the permission -for the access key, you will need to create a user with just the permissions you -want to associate with the access key. Otherwise, you can just create an access key -using your existing user account.

    -

    Setup Alibaba Cloud CLI -and follow the steps to configure the artifact storage for your workflow:

    -
    $ export mybucket=bucket-workflow-artifect
    -$ export myregion=cn-zhangjiakou
    -$ # limit permission to read/write the bucket.
    -$ cat > policy.json <<EOF
    -{
    -    "Version": "1",
    -    "Statement": [
    -        {
    -            "Effect": "Allow",
    -            "Action": [
    -              "oss:PutObject",
    -              "oss:GetObject"
    -            ],
    -            "Resource": "acs:oss:*:*:$mybucket/*"
    -        }
    -    ]
    -}
    -EOF
    -$ # create bucket.
    -$ aliyun oss mb oss://$mybucket --region $myregion
    -$ # show endpoint of bucket.
    -$ aliyun oss stat oss://$mybucket
    -$ #create a ram user to access bucket.
    -$ aliyun ram CreateUser --UserName $mybucket-user
    -$ # create ram policy with the limit permission.
    -$ aliyun ram CreatePolicy --PolicyName $mybucket-policy --PolicyDocument "$(cat policy.json)"
    -$ # attch ram policy to the ram user.
    -$ aliyun ram AttachPolicyToUser --UserName $mybucket-user --PolicyName $mybucket-policy --PolicyType Custom
    -$ # create access key and secret key for the ram user.
    -$ aliyun ram CreateAccessKey --UserName $mybucket-user > access-key.json
    -$ # create secret in demo namespace, replace demo with your namespace.
    -$ kubectl create secret generic $mybucket-credentials -n demo\
    -  --from-literal "accessKey=$(cat access-key.json | jq -r .AccessKey.AccessKeyId)" \
    -  --from-literal "secretKey=$(cat access-key.json | jq -r .AccessKey.AccessKeySecret)"
    -$ # create configmap to config default artifact for a namespace.
    -$ cat > default-artifact-repository.yaml << EOF
    -apiVersion: v1
    -kind: ConfigMap
    -metadata:
    -  # If you want to use this config map by default, name it "artifact-repositories". Otherwise, you can provide a reference to a
    -  # different config map in `artifactRepositoryRef.configMap`.
    -  name: artifact-repositories
    -  annotations:
    -    # v3.0 and after - if you want to use a specific key, put that key into this annotation.
    -    workflows.argoproj.io/default-artifact-repository: default-oss-artifact-repository
    -data:
    -  default-oss-artifact-repository: |
    -    oss:
    -      endpoint: http://oss-cn-zhangjiakou-internal.aliyuncs.com
    -      bucket: $mybucket
    -      # accessKeySecret and secretKeySecret are secret selectors.
    -      # It references the k8s secret named 'bucket-workflow-artifect-credentials'.
    -      # This secret is expected to have have the keys 'accessKey'
    -      # and 'secretKey', containing the base64 encoded credentials
    -      # to the bucket.
    -      accessKeySecret:
    -        name: $mybucket-credentials
    -        key: accessKey
    -      secretKeySecret:
    -        name: $mybucket-credentials
    -        key: secretKey
    -EOF
    -# create cm in demo namespace, replace demo with your namespace.
    -$ k apply -f default-artifact-repository.yaml -n demo
    -
    -

    You can also set createBucketIfNotPresent to true to tell the artifact driver to automatically create the OSS bucket if it doesn't exist yet when saving artifacts. Note that you'll need to set additional permission for your OSS account to create new buckets.

    -

    Alibaba Cloud OSS RRSA

    -

    If you wish to use OSS RRSA instead of passing in an accessKey and secretKey, you need to perform the following actions:

    -
      -
    • Install pod-identity-webhook in your cluster to automatically inject the OIDC tokens and environment variables.
    • -
    • Add the label pod-identity.alibabacloud.com/injection: 'on' to the target workflow namespace.
    • -
    • Add the annotation pod-identity.alibabacloud.com/role-name: $your_ram_role_name to the service account of running workflow.
    • -
    • Set useSDKCreds: true in your target artifact repository cm and remove the secret references to AK/SK.
    • -
    -
    apiVersion: v1
    -kind: Namespace
    -metadata:
    -  name: my-ns
    -  labels:
    -    pod-identity.alibabacloud.com/injection: 'on'
    -
    ----
    -apiVersion: v1
    -kind: ServiceAccount
    -metadata:
    -  name: my-sa
    -  namespace: rrsa-demo
    -  annotations:
    -    pod-identity.alibabacloud.com/role-name: $your_ram_role_name
    -
    ----
    -apiVersion: v1
    -kind: ConfigMap
    -metadata:
    -  # If you want to use this config map by default, name it "artifact-repositories". Otherwise, you can provide a reference to a
    -  # different config map in `artifactRepositoryRef.configMap`.
    -  name: artifact-repositories
    -  annotations:
    -    # v3.0 and after - if you want to use a specific key, put that key into this annotation.
    -    workflows.argoproj.io/default-artifact-repository: default-oss-artifact-repository
    -data:
    -  default-oss-artifact-repository: |
    -    oss:
    -      endpoint: http://oss-cn-zhangjiakou-internal.aliyuncs.com
    -      bucket: $mybucket
    -      useSDKCreds: true
    -
    -

    Configuring Azure Blob Storage

    -

    Create an Azure Storage account and a container within that account. There are a number of -ways to accomplish this, including the Azure Portal or the -CLI.

    -
      -
    1. Retrieve the blob service endpoint for the storage account. For example:
    2. -
    -
    az storage account show -n mystorageaccountname --query 'primaryEndpoints.blob' -otsv
    -
    -
      -
    1. Retrieve the access key for the storage account. For example:
    2. -
    -
    az storage account keys list -n mystorageaccountname --query '[0].value' -otsv
    -
    -
      -
    1. Create a kubernetes secret to hold the storage account key. For example:
    2. -
    -
    kubectl create secret generic my-azure-storage-credentials \
    -  --from-literal "account-access-key=$(az storage account keys list -n mystorageaccountname --query '[0].value' -otsv)"
    -
    -
      -
    1. Configure azure artifact as following in the yaml.
    2. -
    -
    artifacts:
    -  - name: message
    -    path: /tmp/message
    -    azure:
    -      endpoint: https://mystorageaccountname.blob.core.windows.net
    -      container: my-container-name
    -      blob: path/in/container
    -      # accountKeySecret is a secret selector.
    -      # It references the k8s secret named 'my-azure-storage-credentials'.
    -      # This secret is expected to have have the key 'account-access-key',
    -      # containing the base64 encoded credentials to the storage account.
    -      #
    -      # If a managed identity has been assigned to the machines running the
    -      # workflow (e.g., https://docs.microsoft.com/en-us/azure/aks/use-managed-identity)
    -      # then accountKeySecret is not needed, and useSDKCreds should be
    -      # set to true instead:
    -      # useSDKCreds: true
    -      accountKeySecret:
    -        name: my-azure-storage-credentials
    -        key: account-access-key     
    -
    -

    If useSDKCreds is set to true, then the accountKeySecret value is not -used and authentication with Azure will be attempted using a -DefaultAzureCredential -instead.

    -

    Configure the Default Artifact Repository

    -

    In order for Argo to use your artifact repository, you can configure it as the -default repository. Edit the workflow-controller config map with the correct -endpoint and access/secret keys for your repository.

    -

    S3 compatible artifact repository bucket (such as AWS, GCS, MinIO, and Alibaba Cloud OSS)

    -

    Use the endpoint corresponding to your provider:

    -
      -
    • AWS: s3.amazonaws.com
    • -
    • GCS: storage.googleapis.com
    • -
    • MinIO: my-minio-endpoint.default:9000
    • -
    • Alibaba Cloud OSS: oss-cn-hangzhou-zmf.aliyuncs.com
    • -
    -

    The key is name of the object in the bucket The accessKeySecret and -secretKeySecret are secret selectors that reference the specified kubernetes -secret. The secret is expected to have the keys accessKey and secretKey, -containing the base64 encoded credentials to the bucket.

    -

    For AWS, the accessKeySecret and secretKeySecret correspond to -AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY respectively.

    -

    EC2 provides a meta-data API via which applications using the AWS SDK may assume -IAM roles associated with the instance. If you are running argo on EC2 and the -instance role allows access to your S3 bucket, you can configure the workflow -step pods to assume the role. To do so, simply omit the accessKeySecret and -secretKeySecret fields.

    -

    For GCS, the accessKeySecret and secretKeySecret for S3 compatible access -can be obtained from the GCP Console. Note that S3 compatible access is on a per -project rather than per bucket basis.

    - -

    For MinIO, the accessKeySecret and secretKeySecret naturally correspond the -AccessKey and SecretKey.

    -

    For Alibaba Cloud OSS, the accessKeySecret and secretKeySecret corresponds to -accessKeyID and accessKeySecret respectively.

    -

    Example:

    -
    $ kubectl edit configmap workflow-controller-configmap -n argo  # assumes argo was installed in the argo namespace
    -...
    -data:
    -  artifactRepository: |
    -    s3:
    -      bucket: my-bucket
    -      keyFormat: prefix/in/bucket     #optional
    -      endpoint: my-minio-endpoint.default:9000        #AWS => s3.amazonaws.com; GCS => storage.googleapis.com
    -      insecure: true                  #omit for S3/GCS. Needed when minio runs without TLS
    -      accessKeySecret:                #omit if accessing via AWS IAM
    -        name: my-minio-cred
    -        key: accessKey
    -      secretKeySecret:                #omit if accessing via AWS IAM
    -        name: my-minio-cred
    -        key: secretKey
    -      useSDKCreds: true               #tells argo to use AWS SDK's default provider chain, enable for things like IRSA support
    -
    -

    The secrets are retrieved from the namespace you use to run your workflows. Note -that you can specify a keyFormat.

    -

    Google Cloud Storage (GCS)

    -

    Argo also can use native GCS APIs to access a Google Cloud Storage bucket.

    -

    serviceAccountKeySecret references to a Kubernetes secret which stores a Google Cloud -service account key to access the bucket.

    -

    Example:

    -
    $ kubectl edit configmap workflow-controller-configmap -n argo  # assumes argo was installed in the argo namespace
    -...
    -data:
    -  artifactRepository: |
    -    gcs:
    -      bucket: my-bucket
    -      keyFormat: prefix/in/bucket/{{workflow.name}}/{{pod.name}}     #it should reference workflow variables, such as "{{workflow.name}}/{{pod.name}}"
    -      serviceAccountKeySecret:
    -        name: my-gcs-credentials
    -        key: serviceAccountKey
    -
    -

    Azure Blob Storage

    -

    Argo can use native Azure APIs to access a Azure Blob Storage container.

    -

    accountKeySecret references to a Kubernetes secret which stores an Azure Blob -Storage account shared key to access the container.

    -

    Example:

    -
    $ kubectl edit configmap workflow-controller-configmap -n argo  # assumes argo was installed in the argo namespace
    -...
    -data:
    -  artifactRepository: |
    -    azure:
    -      container: my-container
    -      blobNameFormat: prefix/in/container     #optional, it could reference workflow variables, such as "{{workflow.name}}/{{pod.name}}"
    -      accountKeySecret:
    -        name: my-azure-storage-credentials
    -        key: account-access-key
    -
    -

    Accessing Non-Default Artifact Repositories

    -

    This section shows how to access artifacts from non-default artifact -repositories.

    -

    The endpoint, accessKeySecret and secretKeySecret are the same as for -configuring the default artifact repository described previously.

    -
      templates:
    -  - name: artifact-example
    -    inputs:
    -      artifacts:
    -      - name: my-input-artifact
    -        path: /my-input-artifact
    -        s3:
    -          endpoint: s3.amazonaws.com
    -          bucket: my-aws-bucket-name
    -          key: path/in/bucket/my-input-artifact.tgz
    -          accessKeySecret:
    -            name: my-aws-s3-credentials
    -            key: accessKey
    -          secretKeySecret:
    -            name: my-aws-s3-credentials
    -            key: secretKey
    -    outputs:
    -      artifacts:
    -      - name: my-output-artifact
    -        path: /my-output-artifact
    -        s3:
    -          endpoint: storage.googleapis.com
    -          bucket: my-gcs-bucket-name
    -          # NOTE that, by default, all output artifacts are automatically tarred and
    -          # gzipped before saving. So as a best practice, .tgz or .tar.gz
    -          # should be incorporated into the key name so the resulting file
    -          # has an accurate file extension.
    -          key: path/in/bucket/my-output-artifact.tgz
    -          accessKeySecret:
    -            name: my-gcs-s3-credentials
    -            key: accessKey
    -          secretKeySecret:
    -            name: my-gcs-s3-credentials
    -            key: secretKey
    -          region: my-GCS-storage-bucket-region
    -    container:
    -      image: debian:latest
    -      command: [sh, -c]
    -      args: ["cp -r /my-input-artifact /my-output-artifact"]
    -
    -

    Artifact Streaming

    -

    With artifact streaming, artifacts don’t need to be saved to disk first. Artifact streaming is only supported in the following -artifact drivers: S3 (v3.4+), Azure Blob (v3.4+), HTTP (v3.5+), and Artifactory (v3.5+).

    -

    Previously, when a user would click the button to download an artifact in the UI, the artifact would need to be written to the -Argo Server’s disk first before downloading. If many users tried to download simultaneously, they would take up -disk space and fail the download.

    - - - - -

    Comments

    - - +

    Configuring Your Artifact Repository - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/container-set-template/index.html b/container-set-template/index.html index e2107442955a..85fc998df93a 100644 --- a/container-set-template/index.html +++ b/container-set-template/index.html @@ -1,4071 +1,68 @@ - - - - - - - - - - - - - Container Set Template - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Container Set Template - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Container Set Template

    -
    -

    v3.1 and after

    -
    -

    A container set templates is similar to a normal container or script template, but allows you to specify multiple -containers to run within a single pod.

    -

    Because you have multiple containers within a pod, they will be scheduled on the same host. You can use cheap and fast -empty-dir volumes instead of persistent volume claims to share data between steps.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: container-set-template-
    -spec:
    -  entrypoint: main
    -  templates:
    -    - name: main
    -      volumes:
    -        - name: workspace
    -          emptyDir: { }
    -      containerSet:
    -        volumeMounts:
    -          - mountPath: /workspace
    -            name: workspace
    -        containers:
    -          - name: a
    -            image: argoproj/argosay:v2
    -            command: [sh, -c]
    -            args: ["echo 'a: hello world' >> /workspace/message"]
    -          - name: b
    -            image: argoproj/argosay:v2
    -            command: [sh, -c]
    -            args: ["echo 'b: hello world' >> /workspace/message"]
    -          - name: main
    -            image: argoproj/argosay:v2
    -            command: [sh, -c]
    -            args: ["echo 'main: hello world' >> /workspace/message"]
    -            dependencies:
    -              - a
    -              - b
    -      outputs:
    -        parameters:
    -          - name: message
    -            valueFrom:
    -              path: /workspace/message
    -
    -

    There are a couple of caveats:

    -
      -
    1. You must use the Emissary Executor.
    2. -
    3. Or all containers must run in parallel - i.e. it is a graph with no dependencies.
    4. -
    5. You cannot use enhanced depends logic.
    6. -
    7. It will use the sum total of all resource requests, maybe costing more than the same DAG template. This will be a problem if your requests already cost a lot. See below.
    8. -
    -

    The containers can be arranged as a graph by specifying dependencies. This is suitable for running 10s rather than 100s -of containers.

    -

    Inputs and Outputs

    -

    As with the container and script templates, inputs and outputs can only be loaded and saved from a container -named main.

    -

    All container set templates that have artifacts must/should have a container named main.

    -

    If you want to use base-layer artifacts, main must be last to finish, so it must be the root node in the graph.

    -

    That is may not be practical.

    -

    Instead, have a workspace volume and make sure all artifacts paths are on that volume.

    -

    ⚠️ Resource Requests

    -

    A container set actually starts all containers, and the Emissary only starts the main container process when the containers it depends on have completed. This mean that even though the container is doing no useful work, it is still consuming resources and you're still getting billed for them.

    -

    If your requests are small, this won't be a problem.

    -

    If your requests are large, set the resource requests so the sum total is the most you'll need at once.

    -

    Example A: a simple sequence e.g. a -> b -> c

    -
      -
    • a needs 1Gi memory
    • -
    • b needs 2Gi memory
    • -
    • c needs 1Gi memory
    • -
    -

    Then you know you need only a maximum of 2Gi. You could set as follows:

    -
      -
    • a requests 512Mi memory
    • -
    • b requests 1Gi memory
    • -
    • c requests 512Mi memory
    • -
    -

    The total is 2Gi, which is enough for b. We're all good.

    -

    Example B: Diamond DAG e.g. a diamond a -> b -> d and a -> c -> d, i.e. b and c run at the same time.

    -
      -
    • a needs 1000 cpu
    • -
    • b needs 2000 cpu
    • -
    • c needs 1000 cpu
    • -
    • d needs 1000 cpu
    • -
    -

    I know that b and c will run at the same time. So I need to make sure the total is 3000.

    -
      -
    • a requests 500 cpu
    • -
    • b requests 1000 cpu
    • -
    • c requests 1000 cpu
    • -
    • d requests 500 cpu
    • -
    -

    The total is 3000, which is enough for b + c. We're all good.

    -

    Example B: Lopsided requests, e.g. a -> b where a is cheap and b is expensive

    -
      -
    • a needs 100 cpu, 1Mi memory, runs for 10h
    • -
    • b needs 8Ki GPU, 100 Gi memory, 200 Ki GPU, runs for 5m
    • -
    -

    Can you see the problem here? a only has small requests, but the container set will use the total of all requests. So it's as if you're using all that GPU for 10h. This will be expensive.

    -

    Solution: do not use container set when you have lopsided requests.

    - - - - -

    Comments

    - - +

    Container Set Template - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cost-optimisation/index.html b/cost-optimisation/index.html index 6b492ac7b6a9..7191c4c9e570 100644 --- a/cost-optimisation/index.html +++ b/cost-optimisation/index.html @@ -1,4185 +1,68 @@ - - - - - - - - - - - - - Cost Optimization - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Cost Optimization - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - - - - +
    +
    +
    +
    - - - - - - - - -

    Cost Optimization

    -

    User Cost Optimizations

    -

    Suggestions for users running workflows.

    -

    Set The Workflows Pod Resource Requests

    -
    -

    Suitable if you are running a workflow with many homogeneous pods.

    -
    -

    Resource duration shows the amount of CPU and memory requested by a pod and is indicative of the cost. You can use this to find costly steps within your workflow.

    -

    Smaller requests can be set in the pod spec patch's resource requirements.

    -

    Use A Node Selector To Use Cheaper Instances

    -

    You can use a node selector for cheaper instances, e.g. spot instances:

    -
    nodeSelector:
    -  "node-role.kubernetes.io/argo-spot-worker": "true"
    -
    -

    Consider trying Volume Claim Templates or Volumes instead of Artifacts

    -
    -

    Suitable if you have a workflow that passes a lot of artifacts within itself.

    -
    -

    Copying artifacts to and from storage outside of a cluster can be expensive. The correct choice is dependent on what your artifact storage provider is vs. what volume they are using. For example, we believe it may be more expensive to allocate and delete a new block storage volume (AWS EBS, GCP persistent disk) every workflow using the PVC feature, than it is to upload and download some small files to object storage (AWS S3, GCP cloud storage).

    -

    On the other hand if you are using a NFS volume shared between all your workflows with large artifacts, that might be cheaper than the data transfer and storage costs of object storage.

    -

    Consider:

    -
      -
    • Data transfer costs (upload/download vs. copying)
    • -
    • Data storage costs (object storage vs. volume)
    • -
    • Requirement for parallel access to data (NFS vs. block storage vs. artifact)
    • -
    -

    When using volume claims, consider configuring Volume Claim GC. By default, claims are only deleted when a workflow is successful.

    -

    Limit The Total Number Of Workflows And Pods

    -
    -

    Suitable for all.

    -
    -

    A workflow (and for that matter, any Kubernetes resource) will incur a cost as long as it exists in your cluster, even after it's no longer running.

    -

    The workflow controller memory and CPU needs to increase linearly with the number of pods and workflows you are currently running.

    -

    You should delete workflows once they are no longer needed. -You can enable the Workflow Archive to continue viewing them after they are removed from Kubernetes.

    -

    Limit the total number of workflows using:

    -
      -
    • Active Deadline Seconds - terminate running workflows that do not complete in a set time. This will make sure workflows do not run forever.
    • -
    • Workflow TTL Strategy - delete completed workflows after a set time.
    • -
    • Pod GC - delete completed pods. By default, Pods are not deleted.
    • -
    • CronWorkflow history limits - delete successful or failed workflows which exceed the limit.
    • -
    -

    Example

    -
    spec:
    -  # must complete in 8h (28,800 seconds)
    -  activeDeadlineSeconds: 28800
    -  # keep workflows for 1d (86,400 seconds)
    -  ttlStrategy:
    -    secondsAfterCompletion: 86400
    -  # delete all pods as soon as they complete
    -  podGC:
    -    strategy: OnPodCompletion
    -
    -

    You can set these configurations globally using Default Workflow Spec.

    -

    Changing these settings will not delete workflows that have already run. To list old workflows:

    -
    argo list --completed --since 7d
    -
    -
    -

    v2.9 and after

    -
    -

    To list/delete workflows completed over 7 days ago:

    -
    argo list --older 7d
    -argo delete --older 7d
    -
    -

    Operator Cost Optimizations

    -

    Suggestions for operators who installed Argo Workflows.

    -

    Set Resources Requests and Limits

    -
    -

    Suitable if you have many instances, e.g. on dozens of clusters or namespaces.

    -
    -

    Set resource requests and limits for the workflow-controller and argo-server, e.g.

    -
    requests:
    -  cpu: 100m
    -  memory: 64Mi
    -limits:
    -  cpu: 500m
    -  memory: 128Mi
    -
    -

    This above limit is suitable for the Argo Server, as this is stateless. The Workflow Controller is stateful and will scale to the number of live workflows - so you are likely to need higher values.

    -

    Configure Executor Resource Requests

    -
    -

    Suitable for all - unless you have large artifacts.

    -
    -

    Configure workflow-controller-configmap.yaml to set the executor.resources:

    -
    executor: |
    -  resources:
    -    requests:
    -      cpu: 100m
    -      memory: 64Mi
    -    limits:
    -      cpu: 500m
    -      memory: 512Mi
    -
    -

    The correct values depend on the size of artifacts your workflows download. For artifacts > 10GB, memory usage may be large - #1322.

    - - - - -

    Comments

    - - +

    Cost Optimization - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cron-backfill/index.html b/cron-backfill/index.html index 54320937d8e2..07926e57b099 100644 --- a/cron-backfill/index.html +++ b/cron-backfill/index.html @@ -1,3989 +1,68 @@ - - - - - - - - - - - - - Cron Backfill - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Cron Backfill - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Cron Backfill

    -

    Use Case

    -
      -
    • You are using cron workflows to run daily jobs, you may need to re-run for a date, or run some historical days.
    • -
    -

    Solution

    -
      -
    1. Create a workflow template for your daily job.
    2. -
    3. Create your cron workflow to run daily and invoke that template.
    4. -
    5. Create a backfill workflow that uses withSequence to run the job for each date.
    6. -
    -

    This full example contains:

    -
      -
    • A workflow template named job.
    • -
    • A cron workflow named daily-job.
    • -
    • A workflow named backfill-v1 that uses a resource template to create one workflow for each backfill date.
    • -
    • A alternative workflow named backfill-v2 that uses a steps templates to run one task for each backfill date.
    • -
    - - - - -

    Comments

    - - +

    Cron Backfill - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/cron-workflows/index.html b/cron-workflows/index.html index a422dcd66356..f9e92aa63315 100644 --- a/cron-workflows/index.html +++ b/cron-workflows/index.html @@ -1,4413 +1,68 @@ - - - - - - - - - - - - - Cron Workflows - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Cron Workflows - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Cron Workflows

    -
    -

    v2.5 and after

    -
    -

    Introduction

    -

    CronWorkflow are workflows that run on a preset schedule. They are designed to be converted from Workflow easily and to mimic the same options as Kubernetes CronJob. In essence, CronWorkflow = Workflow + some specific cron options.

    -

    CronWorkflow Spec

    -

    An example CronWorkflow spec would look like:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: CronWorkflow
    -metadata:
    -  name: test-cron-wf
    -spec:
    -  schedule: "* * * * *"
    -  concurrencyPolicy: "Replace"
    -  startingDeadlineSeconds: 0
    -  workflowSpec:
    -    entrypoint: whalesay
    -    templates:
    -    - name: whalesay
    -      container:
    -        image: alpine:3.6
    -        command: [sh, -c]
    -        args: ["date; sleep 90"]
    -
    -

    workflowSpec and workflowMetadata

    -

    CronWorkflow.spec.workflowSpec is the same type as Workflow.spec and serves as a template for Workflow objects that are created from it. Everything under this spec will be converted to a Workflow.

    -

    The resulting Workflow name will be a generated name based on the CronWorkflow name. In this example it could be something like test-cron-wf-tj6fe.

    -

    CronWorkflow.spec.workflowMetadata can be used to add labels and annotations.

    -

    CronWorkflow Options

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Option NameDefault ValueDescription
    scheduleNone, must be providedSchedule at which the Workflow will be run. E.g. 5 4 * * *
    timezoneMachine timezoneTimezone during which the Workflow will be run from the IANA timezone standard, e.g. America/Los_Angeles
    suspendfalseIf true Workflow scheduling will not occur. Can be set from the CLI, GitOps, or directly
    concurrencyPolicyAllowPolicy that determines what to do if multiple Workflows are scheduled at the same time. Available options: Allow: allow all, Replace: remove all old before scheduling a new, Forbid: do not allow any new while there are old
    startingDeadlineSeconds0Number of seconds after the last successful run during which a missed Workflow will be run
    successfulJobsHistoryLimit3Number of successful Workflows that will be persisted at a time
    failedJobsHistoryLimit1Number of failed Workflows that will be persisted at a time
    -

    Cron Schedule Syntax

    -

    The cron scheduler uses the standard cron syntax, as documented on Wikipedia.

    -

    More detailed documentation for the specific library used is documented here.

    -

    Crash Recovery

    -

    If the workflow-controller crashes (and hence the CronWorkflow controller), there are some options you can set to ensure that CronWorkflows that would have been scheduled while the controller was down can still run. Mainly startingDeadlineSeconds can be set to specify the maximum number of seconds past the last successful run of a CronWorkflow during which a missed run will still be executed.

    -

    For example, if a CronWorkflow that runs every minute is last run at 12:05:00, and the controller crashes between 12:05:55 and 12:06:05, then the expected execution time of 12:06:00 would be missed. However, if startingDeadlineSeconds is set to a value greater than 65 (the amount of time passing between the last scheduled run time of 12:05:00 and the current controller restart time of 12:06:05), then a single instance of the CronWorkflow will be executed exactly at 12:06:05.

    -

    Currently only a single instance will be executed as a result of setting startingDeadlineSeconds.

    -

    This setting can also be configured in tandem with concurrencyPolicy to achieve more fine-tuned control.

    -

    Daylight Saving

    -

    Daylight Saving (DST) is taken into account when using timezone. This means that, depending on the local time of the scheduled job, argo will schedule the workflow once, twice, or not at all when the clock moves forward or back.

    -

    For example, with timezone set at America/Los_Angeles, we have daylight saving

    -
      -
    • -

      +1 hour (DST start) at 2020-03-08 02:00:00:

      -

      Note: The schedules between 02:00 a.m. to 02:59 a.m. were skipped on Mar 8th due to the clock being moved forward:

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      cronsequenceworkflow execution time
      59 1 ** *12020-03-08 01:59:00 -0800 PST
      22020-03-09 01:59:00 -0700 PDT
      32020-03-10 01:59:00 -0700 PDT
      0 2 ** *12020-03-09 02:00:00 -0700 PDT
      22020-03-10 02:00:00 -0700 PDT
      32020-03-11 02:00:00 -0700 PDT
      1 2 ** *12020-03-09 02:01:00 -0700 PDT
      22020-03-10 02:01:00 -0700 PDT
      32020-03-11 02:01:00 -0700 PDT
      -
    • -
    • -

      -1 hour (DST end) at 2020-11-01 02:00:00:

      -

      Note: the schedules between 01:00 a.m. to 01:59 a.m. were triggered twice on Nov 1st due to the clock being set back:

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      cronsequenceworkflow execution time
      59 1 ** *12020-11-01 01:59:00 -0700 PDT
      22020-11-01 01:59:00 -0800 PST
      32020-11-02 01:59:00 -0800 PST
      0 2 ** *12020-11-01 02:00:00 -0800 PST
      22020-11-02 02:00:00 -0800 PST
      32020-11-03 02:00:00 -0800 PST
      1 2 ** *12020-11-01 02:01:00 -0800 PST
      22020-11-02 02:01:00 -0800 PST
      32020-11-03 02:01:00 -0800 PST
      -
    • -
    -

    Managing CronWorkflow

    -

    CLI

    -

    CronWorkflow can be created from the CLI by using basic commands:

    -
    $ argo cron create cron.yaml
    -Name:                          test-cron-wf
    -Namespace:                     argo
    -Created:                       Mon Nov 18 10:17:06 -0800 (now)
    -Schedule:                      * * * * *
    -Suspended:                     false
    -StartingDeadlineSeconds:       0
    -ConcurrencyPolicy:             Forbid
    -
    -$ argo cron list
    -NAME           AGE   LAST RUN   SCHEDULE    SUSPENDED
    -test-cron-wf   49s   N/A        * * * * *   false
    -
    -# some time passes
    -
    -$ argo cron list
    -NAME           AGE   LAST RUN   SCHEDULE    SUSPENDED
    -test-cron-wf   56s   2s         * * * * *   false
    -
    -$ argo cron get test-cron-wf
    -Name:                          test-cron-wf
    -Namespace:                     argo
    -Created:                       Wed Oct 28 07:19:02 -0600 (23 hours ago)
    -Schedule:                      * * * * *
    -Suspended:                     false
    -StartingDeadlineSeconds:       0
    -ConcurrencyPolicy:             Replace
    -LastScheduledTime:             Thu Oct 29 06:51:00 -0600 (11 minutes ago)
    -NextScheduledTime:             Thu Oct 29 13:03:00 +0000 (32 seconds from now)
    -Active Workflows:              test-cron-wf-rt4nf
    -
    -

    Note: NextScheduledRun assumes that the workflow-controller uses UTC as its timezone

    -

    kubectl

    -

    Using kubectl apply -f and kubectl get cwf

    -

    Back-Filling Days

    -

    See cron backfill.

    -

    GitOps via Argo CD

    -

    CronWorkflow resources can be managed with GitOps by using Argo CD

    -

    UI

    -

    CronWorkflow resources can also be managed by the UI

    - - - - -

    Comments

    - - +

    Cron Workflows - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/data-sourcing-and-transformation/index.html b/data-sourcing-and-transformation/index.html index 1f892af6d18e..dfe05936bc47 100644 --- a/data-sourcing-and-transformation/index.html +++ b/data-sourcing-and-transformation/index.html @@ -1,4019 +1,68 @@ - - - - - - - - - - - - - Data Sourcing and Transformations - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Data Sourcing and Transformations - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Data Sourcing and Transformations

    -
    -

    v3.1 and after

    -
    -

    We have intentionally made this feature available with only bare-bones functionality. Our hope is that we are able to build this feature with our community's feedback. If you have ideas and use cases for this feature, please open an enhancement proposal on GitHub.

    -

    Additionally, please take a look at our current ideas at the bottom of this document.

    -

    Introduction

    -

    Users often source and transform data as part of their workflows. The data template provides first-class support for these common operations.

    -

    data templates can best be understood by looking at a common data sourcing and transformation operation in bash:

    -
    find -r . | grep ".pdf" | sed "s/foo/foo.ready/"
    -
    -

    Such operations consist of two main parts:

    -
      -
    • A "source" of data: find -r .
    • -
    • A series of "transformations" which transform the output of the source serially: | grep ".pdf" | sed "s/foo/foo.ready/"
    • -
    -

    This operation, for example, could be useful in sourcing a potential list of files to be processed and filtering and manipulating the list as desired.

    -

    In Argo, this operation would be written as:

    -
    - name: generate-artifacts
    -  data:
    -    source:             # Define a source for the data, only a single "source" is permitted
    -      artifactPaths:    # A predefined source: Generate a list of all artifact paths in a given repository
    -        s3:             # Source from an S3 bucket
    -          bucket: test
    -          endpoint: minio:9000
    -          insecure: true
    -          accessKeySecret:
    -            name: my-minio-cred
    -            key: accesskey
    -          secretKeySecret:
    -            name: my-minio-cred
    -            key: secretkey
    -    transformation:     # The source is then passed to be transformed by transformations defined here
    -      - expression: "filter(data, {# endsWith \".pdf\"})"
    -      - expression: "map(data, {# + \".ready\"})"
    -
    -

    Spec

    -

    A data template must always contain a source. Current available sources:

    -
      -
    • artifactPaths: generates a list of artifact paths from the artifact repository specified
    • -
    -

    A data template may contain any number of transformations (or zero). The transformations will be applied serially in order. Current available transformations:

    -
      -
    • -

      expression: an expr expression. See language definition here. When defining expr expressions Argo will pass the available data to the environment as a variable called data (see example above).

      -

      We understand that the expression transformation is limited. We intend to greatly expand the functionality of this template with our community's feedback. Please see the link at the top of this document to submit ideas or use cases for this feature.

      -
    • -
    - - - - -

    Comments

    - - +

    Data Sourcing and Transformations - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/debug-pause/index.html b/debug-pause/index.html index bc6c6836bac8..3a691401832b 100644 --- a/debug-pause/index.html +++ b/debug-pause/index.html @@ -1,4024 +1,68 @@ - - - - - - - - - - - - - Debug Pause - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Debug Pause - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Debug Pause

    -
    -

    v3.3 and after

    -
    -

    Introduction

    -

    The debug pause feature makes it possible to pause individual workflow steps for debugging before, after or both and then release the steps from the paused state. Currently this feature is only supported when using the Emissary Executor

    -

    In order to pause a container env variables are used:

    -
      -
    • ARGO_DEBUG_PAUSE_AFTER - to pause a step after execution
    • -
    • ARGO_DEBUG_PAUSE_BEFORE - to pause a step before execution
    • -
    -

    Example workflow:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: pause-after-
    -spec:
    -  entrypoint: whalesay
    -  templates:
    -    - name: whalesay
    -      container:
    -        image: argoproj/argosay:v2
    -        env:
    -          - name: ARGO_DEBUG_PAUSE_AFTER
    -            value: 'true'
    -
    -

    In order to release a step from a pause state, marker files are used named /var/run/argo/ctr/main/after or /var/run/argo/ctr/main/before corresponding to when the step is paused. Pausing steps can be used together with ephemeral containers when a shell is not available in the used container.

    -

    Example

    -

    1) Create a workflow where the debug pause env in set, in this example ARGO_DEBUG_PAUSE_AFTER will be set and thus the step will be paused after execution of the user code.

    -

    pause-after.yaml

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: pause-after-
    -spec:
    -  entrypoint: whalesay
    -  templates:
    -    - name: whalesay
    -      container:
    -        image: argoproj/argosay:v2
    -        env:
    -          - name: ARGO_DEBUG_PAUSE_AFTER
    -            value: 'true'
    -
    -
    argo submit -n argo --watch pause-after.yaml
    -
    -

    Create a shell in the container of interest of create a ephemeral container in the pod, in this example ephemeral containers are used.

    -
    kubectl debug -n argo -it POD_NAME --image=busybox --target=main --share-processes
    -
    -

    In order to have access to the persistence volume used by the workflow step, --share-processes will have to be used.

    -

    The ephemeral container can be used to perform debugging operations. When debugging has been completed, create the marker file to allow the workflow step to continue. When using process name space sharing container file systems are visible to other containers in the pod through the /proc/$pid/root link.

    -
    touch /proc/1/root/run/argo/ctr/main/after
    -
    - - - - -

    Comments

    - - +

    Debug Pause - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/default-workflow-specs/index.html b/default-workflow-specs/index.html index 2e996e8b631e..b333f8096138 100644 --- a/default-workflow-specs/index.html +++ b/default-workflow-specs/index.html @@ -1,4014 +1,68 @@ - - - - - - - - - - - - - Default Workflow Spec - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Default Workflow Spec - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Default Workflow Spec

    -
    -

    v2.7 and after

    -
    -

    Introduction

    -

    Default Workflow spec values can be set at the controller config map that will apply to all Workflows executed from said controller. -If a Workflow has a value that also has a default value set in the config map, the Workflow's value will take precedence.

    -

    Setting Default Workflow Values

    -

    Default Workflow values can be specified by adding them under the workflowDefaults key in the workflow-controller-configmap. -Values can be added as they would under the Workflow.spec tag.

    -

    For example, to specify default values that would partially produce the following Workflow:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: gc-ttl-
    -  annotations:
    -    argo: workflows
    -  labels:
    -    foo: bar
    -spec:
    -  ttlStrategy:
    -    secondsAfterSuccess: 5     # Time to live after workflow is successful
    -  parallelism: 3
    -
    -

    The following would be specified in the Config Map:

    -
    # This file describes the config settings available in the workflow controller configmap
    -apiVersion: v1
    -kind: ConfigMap
    -metadata:
    -  name: workflow-controller-configmap
    -data:
    -  # Default values that will apply to all Workflows from this controller, unless overridden on the Workflow-level
    -  workflowDefaults: |
    -    metadata:
    -      annotations:
    -        argo: workflows
    -      labels:
    -        foo: bar
    -    spec:
    -      ttlStrategy:
    -        secondsAfterSuccess: 5
    -      parallelism: 3
    -
    - - - - -

    Comments

    - - +

    Default Workflow Spec - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/disaster-recovery/index.html b/disaster-recovery/index.html index 6f940a354306..6ff16fe37f17 100644 --- a/disaster-recovery/index.html +++ b/disaster-recovery/index.html @@ -1,3920 +1,68 @@ - - - - - - - - - - - - - Disaster Recovery (DR) - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Disaster Recovery (DR) - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Disaster Recovery (DR)

    -

    We only store data in your Kubernetes cluster. You should consider backing this up regularly.

    -

    Exporting example:

    -
    kubectl get wf,cwf,cwft,wftmpl -A -o yaml > backup.yaml
    -
    -

    Importing example:

    -
    kubectl apply -f backup.yaml 
    -
    -

    You should also back-up any SQL persistence you use regularly with whatever tool is provided with it.

    - - - - -

    Comments

    - - +

    Disaster Recovery (DR) - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/doc-changes/index.html b/doc-changes/index.html index 69053f0484d4..51f8ea8b30a4 100644 --- a/doc-changes/index.html +++ b/doc-changes/index.html @@ -1,4003 +1,68 @@ - - - - - - - - - - - - - Documentation Changes - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Documentation Changes - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Documentation Changes

    -

    Docs help our customers understand how to use workflows and fix their own problems.

    -

    Doc changes are checked for spelling, broken links, and lint issues by CI. To check locally, run make docs.

    -

    General guidelines:

    -
      -
    • Explain when you would want to use a feature.
    • -
    • Provide working examples.
    • -
    • Format code using back-ticks to avoid it being reported as a spelling error.
    • -
    • Prefer 1 sentence per line of markdown
    • -
    • Follow the recommendations in the official Kubernetes Documentation Style Guide.
        -
      • Particularly useful sections include Content best practices and Patterns to avoid.
      • -
      • Note: Argo does not use the same tooling, so the sections on "shortcodes" and "EditorConfig" are not relevant.
      • -
      -
    • -
    -

    Running Locally

    -

    To test/run locally:

    -
    make docs-serve
    -
    -

    Tips

    -

    Use a service like Grammarly to check your grammar.

    -

    Having your computer read text out loud is a way to catch problems, e.g.:

    -
      -
    • Word substitutions (i.e. the wrong word is used, but spelled. -correctly).
    • -
    • Sentences that do not read correctly will sound wrong.
    • -
    -

    On Mac, to set-up:

    -
      -
    • Go to System Preferences / Accessibility / Spoken Content.
    • -
    • Choose a System Voice (I like Siri Voice 1).
    • -
    • Enable Speak selection.
    • -
    -

    To hear text, select the text you want to hear, then press option+escape.

    - - - - -

    Comments

    - - +

    Documentation Changes - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/empty-dir/index.html b/empty-dir/index.html index 6d2488a9b007..e27284048673 100644 --- a/empty-dir/index.html +++ b/empty-dir/index.html @@ -1,3943 +1,68 @@ - - - - - - - - - - - - - Empty Dir - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Empty Dir - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Empty Dir

    -

    While by default, the Docker and PNS workflow executors can get output artifacts/parameters from the base layer (e.g. /tmp), neither the Kubelet nor the K8SAPI executors can. It is unlikely you can get output artifacts/parameters from the base layer if you run your workflow pods with a security context.

    -

    You can work-around this constraint by mounting volumes onto your pod. The easiest way to do this is to use as emptyDir volume.

    -
    -

    Note

    -

    This is only needed for output artifacts/parameters. Input artifacts/parameters are automatically mounted to an empty-dir if needed

    -
    -

    This example shows how to mount an output volume:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: empty-dir-
    -spec:
    -  entrypoint: main
    -  templates:
    -    - name: main
    -      container:
    -        image: argoproj/argosay:v2
    -        command: [sh, -c]
    -        args: ["cowsay hello world | tee /mnt/out/hello_world.txt"]
    -        volumeMounts:
    -          - name: out
    -            mountPath: /mnt/out
    -      volumes:
    -        - name: out
    -          emptyDir: { }
    -      outputs:
    -        parameters:
    -          - name: message
    -            valueFrom:
    -              path: /mnt/out/hello_world.txt
    -
    - - - - -

    Comments

    - - +

    Empty Dir - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/enhanced-depends-logic/index.html b/enhanced-depends-logic/index.html index 06665ccc1021..165ac053f9e1 100644 --- a/enhanced-depends-logic/index.html +++ b/enhanced-depends-logic/index.html @@ -1,4070 +1,68 @@ - - - - - - - - - - - - - Enhanced Depends Logic - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Enhanced Depends Logic - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Enhanced Depends Logic

    -
    -

    v2.9 and after

    -
    -

    Introduction

    -

    Previous to version 2.8, the only way to specify dependencies in DAG templates was to use the dependencies field and -specify a list of other tasks the current task depends on. This syntax was limiting because it does not allow the user to -specify which result of the task to depend on. For example, a task may only be relevant to run if the dependent task -succeeded (or failed, etc.).

    -

    Depends

    -

    To remedy this, there exists a new field called depends, which allows users to specify dependent tasks, their statuses, -as well as any complex boolean logic. The field is a string field and the syntax is expression-like with operands having -form <task-name>.<task-result>. Examples include task-1.Succeeded, task-2.Failed, task-3.Daemoned. The full list of -available task results is as follows:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Task ResultDescriptionMeaning
    .SucceededTask SucceededTask finished with no error
    .FailedTask FailedTask exited with a non-0 exit code
    .ErroredTask ErroredTask had an error other than a non-0 exit code
    .SkippedTask SkippedTask was skipped
    .OmittedTask OmittedTask was omitted
    .DaemonedTask is Daemoned and is not Pending
    -

    For convenience, if an omitted task result is equivalent to (task.Succeeded || task.Skipped || task.Daemoned).

    -

    For example:

    -
    depends: "task || task-2.Failed"
    -
    -

    is equivalent to:

    -
    depends: (task.Succeeded || task.Skipped || task.Daemoned) || task-2.Failed
    -
    -

    Full boolean logic is also available. Operators include:

    -
      -
    • &&
    • -
    • ||
    • -
    • !
    • -
    -

    Example:

    -
    depends: "(task-2.Succeeded || task-2.Skipped) && !task-3.Failed"
    -
    -

    In the case that you're depending on a task that uses withItems, you can depend on -whether any of the item tasks are successful or all have failed using .AnySucceeded and .AllFailed, for example:

    -
    depends: "task-1.AnySucceeded || task-2.AllFailed"
    -
    -

    Compatibility with dependencies and dag.task.continueOn

    -

    This feature is fully compatible with dependencies and conversion is easy.

    -

    To convert simply join your dependencies with &&:

    -
    dependencies: ["A", "B", "C"]
    -
    -

    is equivalent to:

    -
    depends: "A && B && C"
    -
    -

    Because of the added control found in depends, the dag.task.continueOn is not available when using it. Furthermore, -it is not possible to use both dependencies and depends in the same task group.

    - - - - -

    Comments

    - - +

    Enhanced Depends Logic - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/environment-variables/index.html b/environment-variables/index.html index 586764f3acc1..13bbf7c2b8a3 100644 --- a/environment-variables/index.html +++ b/environment-variables/index.html @@ -1,4437 +1,68 @@ - - - - - - - - - - - - - Environment Variables - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Environment Variables - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Environment Variables

    -

    This document outlines environment variables that can be used to customize behavior.

    -
    -

    Warning

    -

    Environment variables are typically added to test out experimental features and should not be used by most users. -Environment variables may be removed at any time.

    -
    -

    Controller

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDefaultDescription
    ARGO_AGENT_TASK_WORKERSint16The number of task workers for the agent pod.
    ALL_POD_CHANGES_SIGNIFICANTboolfalseWhether to consider all pod changes as significant during pod reconciliation.
    ALWAYS_OFFLOAD_NODE_STATUSboolfalseWhether to always offload the node status.
    ARCHIVED_WORKFLOW_GC_PERIODtime.Duration24hThe periodicity for GC of archived workflows.
    ARGO_PPROFboolfalseEnable pprof endpoints
    ARGO_PROGRESS_PATCH_TICK_DURATIONtime.Duration1mHow often self reported progress is patched into the pod annotations which means how long it takes until the controller picks up the progress change. Set to 0 to disable self reporting progress.
    ARGO_PROGRESS_FILE_TICK_DURATIONtime.Duration3sHow often the progress file is read by the executor. Set to 0 to disable self reporting progress.
    ARGO_REMOVE_PVC_PROTECTION_FINALIZERbooltrueRemove the kubernetes.io/pvc-protection finalizer from persistent volume claims (PVC) after marking PVCs created for the workflow for deletion, so deleted is not blocked until the pods are deleted. #6629
    ARGO_TRACEstring``Whether to enable tracing statements in Argo components.
    ARGO_AGENT_PATCH_RATEtime.DurationDEFAULT_REQUEUE_TIMERate that the Argo Agent will patch the workflow task-set.
    ARGO_AGENT_CPU_LIMITresource.Quantity100mCPU resource limit for the agent.
    ARGO_AGENT_MEMORY_LIMITresource.Quantity256mMemory resource limit for the agent.
    BUBBLE_ENTRY_TEMPLATE_ERRbooltrueWhether to bubble up template errors to workflow.
    CACHE_GC_PERIODtime.Duration0sHow often to perform memoization cache GC, which is disabled by default and can be enabled by providing a non-zero duration.
    CACHE_GC_AFTER_NOT_HIT_DURATIONtime.Duration30sWhen a memoization cache has not been hit after this duration, it will be deleted.
    CRON_SYNC_PERIODtime.Duration10sHow often to sync cron workflows.
    DEFAULT_REQUEUE_TIMEtime.Duration10sThe re-queue time for the rate limiter of the workflow queue.
    DISABLE_MAX_RECURSIONboolfalseSet to true to disable the recursion preventer, which will stop a workflow running which has called into a child template 100 times
    EXPRESSION_TEMPLATESbooltrueEscape hatch to disable expression templates.
    EVENT_AGGREGATION_WITH_ANNOTATIONSboolfalseWhether event annotations will be used when aggregating events.
    GZIP_IMPLEMENTATIONstringPGZipThe implementation of compression/decompression. Currently only "PGZip" and "GZip" are supported.
    INFORMER_WRITE_BACKbooltrueWhether to write back to informer instead of catching up.
    HEALTHZ_AGEtime.Duration5mHow old a un-reconciled workflow is to report unhealthy.
    INDEX_WORKFLOW_SEMAPHORE_KEYSbooltrueWhether or not to index semaphores.
    LEADER_ELECTION_IDENTITYstringController's metadata.nameThe ID used for workflow controllers to elect a leader.
    LEADER_ELECTION_DISABLEboolfalseWhether leader election should be disabled.
    LEADER_ELECTION_LEASE_DURATIONtime.Duration15sThe duration that non-leader candidates will wait to force acquire leadership.
    LEADER_ELECTION_RENEW_DEADLINEtime.Duration10sThe duration that the acting master will retry refreshing leadership before giving up.
    LEADER_ELECTION_RETRY_PERIODtime.Duration5sThe duration that the leader election clients should wait between tries of actions.
    MAX_OPERATION_TIMEtime.Duration30sThe maximum time a workflow operation is allowed to run for before re-queuing the workflow onto the work queue.
    OFFLOAD_NODE_STATUS_TTLtime.Duration5mThe TTL to delete the offloaded node status. Currently only used for testing.
    OPERATION_DURATION_METRIC_BUCKET_COUNTint6The number of buckets to collect the metric for the operation duration.
    POD_NAMESstringv2Whether to have pod names contain the template name (v2) or be the node id (v1) - should be set the same for Argo Server.
    RECENTLY_STARTED_POD_DURATIONtime.Duration10sThe duration of a pod before the pod is considered to be recently started.
    RETRY_BACKOFF_DURATIONtime.Duration10msThe retry back-off duration when retrying API calls.
    RETRY_BACKOFF_FACTORfloat2.0The retry back-off factor when retrying API calls.
    RETRY_BACKOFF_STEPSint5The retry back-off steps when retrying API calls.
    RETRY_HOST_NAME_LABEL_KEYstringkubernetes.io/hostnameThe label key for host name used when retrying templates.
    TRANSIENT_ERROR_PATTERNstring""The regular expression that represents additional patterns for transient errors.
    WF_DEL_PROPAGATION_POLICYstring""The deletion propagation policy for workflows.
    WORKFLOW_GC_PERIODtime.Duration5mThe periodicity for GC of workflows.
    SEMAPHORE_NOTIFY_DELAYtime.Duration1sTuning Delay when notifying semaphore waiters about availability in the semaphore
    -

    CLI parameters of the Controller can be specified as environment variables with the ARGO_ prefix. -For example:

    -
    workflow-controller --managed-namespace=argo
    -
    -

    Can be expressed as:

    -
    ARGO_MANAGED_NAMESPACE=argo workflow-controller
    -
    -

    You can set environment variables for the Controller Deployment's container spec like the following:

    -
    apiVersion: apps/v1
    -kind: Deployment
    -metadata:
    -  name: workflow-controller
    -spec:
    -  selector:
    -    matchLabels:
    -      app: workflow-controller
    -  template:
    -    metadata:
    -      labels:
    -        app: workflow-controller
    -    spec:
    -      containers:
    -        - env:
    -            - name: WORKFLOW_GC_PERIOD
    -              value: 30s
    -
    -

    Executor

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDefaultDescription
    EXECUTOR_RETRY_BACKOFF_DURATIONtime.Duration1sThe retry back-off duration when the workflow executor performs retries.
    EXECUTOR_RETRY_BACKOFF_FACTORfloat1.6The retry back-off factor when the workflow executor performs retries.
    EXECUTOR_RETRY_BACKOFF_JITTERfloat0.5The retry back-off jitter when the workflow executor performs retries.
    EXECUTOR_RETRY_BACKOFF_STEPSint5The retry back-off steps when the workflow executor performs retries.
    REMOVE_LOCAL_ART_PATHboolfalseWhether to remove local artifacts.
    RESOURCE_STATE_CHECK_INTERVALtime.Duration5sThe time interval between resource status checks against the specified success and failure conditions.
    WAIT_CONTAINER_STATUS_CHECK_INTERVALtime.Duration5sThe time interval for wait container to check whether the containers have completed.
    -

    You can set environment variables for the Executor in your workflow-controller-configmap like the following:

    -
    apiVersion: v1
    -kind: ConfigMap
    -metadata:
    -  name: workflow-controller-configmap
    -data:
    -  config: |
    -    executor:
    -      env:
    -      - name: RESOURCE_STATE_CHECK_INTERVAL
    -        value: 3s
    -
    -

    Argo Server

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDefaultDescription
    DISABLE_VALUE_LIST_RETRIEVAL_KEY_PATTERNstring""Disable the retrieval of the list of label values for keys based on this regular expression.
    FIRST_TIME_USER_MODALbooltrueShow this modal.
    FEEDBACK_MODALbooltrueShow this modal.
    IP_KEY_FUNC_HEADERSstring""List of comma separated request headers containing IPs to use for rate limiting. For example, "X-Forwarded-For,X-Real-IP". By default, uses the request's remote IP address.
    NEW_VERSION_MODALbooltrueShow this modal.
    POD_NAMESstringv2Whether to have pod names contain the template name (v2) or be the node id (v1) - should be set the same for Controller
    GRPC_MESSAGE_SIZEstring104857600Use different GRPC Max message size for Server (supporting huge workflows).
    -

    CLI parameters of the Server can be specified as environment variables with the ARGO_ prefix. -For example:

    -
    argo server --managed-namespace=argo
    -
    -

    Can be expressed as:

    -
    ARGO_MANAGED_NAMESPACE=argo argo server
    -
    -

    You can set environment variables for the Server Deployment's container spec like the following:

    -
    apiVersion: apps/v1
    -kind: Deployment
    -metadata:
    -  name: argo-server
    -spec:
    -  selector:
    -    matchLabels:
    -      app: argo-server
    -  template:
    -    metadata:
    -      labels:
    -        app: argo-server
    -    spec:
    -      containers:
    -        - args:
    -            - server
    -          image: argoproj/argocli:latest
    -          name: argo-server
    -          env:
    -            - name: GRPC_MESSAGE_SIZE
    -              value: "209715200"
    -          ports:
    -          # ...
    -
    - - - - -

    Comments

    - - +

    Environment Variables - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/estimated-duration/index.html b/estimated-duration/index.html index 0ab8a60b82e8..915436566284 100644 --- a/estimated-duration/index.html +++ b/estimated-duration/index.html @@ -1,3926 +1,68 @@ - - - - - - - - - - - - - Estimated Duration - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Estimated Duration - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Estimated Duration

    -
    -

    v2.12 and after

    -
    -

    When you run a workflow, the controller will try to estimate its duration.

    -

    This is based on the most recently successful workflow submitted from the same workflow template, cluster workflow template or cron workflow.

    -

    To get this data, the controller queries the Kubernetes API first (as this is faster) and then workflow archive (if enabled).

    -

    If you've used tools like Jenkins, you'll know that that estimates can be inaccurate:

    -
      -
    • A pod spent a long amount of time pending scheduling.
    • -
    • The workflow is non-deterministic, e.g. it uses when to execute different paths.
    • -
    • The workflow can vary is scale, e.g. sometimes it uses withItems and so sometimes run 100 nodes, sometimes a 1000.
    • -
    • If the pod runtimes are unpredictable.
    • -
    • The workflow is parametrized, and different parameters affect its duration.
    • -
    - - - - -

    Comments

    - - +

    Estimated Duration - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/events/index.html b/events/index.html index 2702a326906a..a273d842ed80 100644 --- a/events/index.html +++ b/events/index.html @@ -1,4325 +1,68 @@ - - - - - - - - - - - - - Events - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Events - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - - - - +
    +
    +
    +
    - - - - - - - - -

    Events

    -
    -

    v2.11 and after

    -
    -

    Overview

    -

    To support external webhooks, we have this endpoint /api/v1/events/{namespace}/{discriminator}. Events sent to that can be any JSON data.

    -

    These events can submit workflow templates or cluster workflow templates.

    -

    You may also wish to read about webhooks.

    -

    Authentication and Security

    -

    Clients wanting to send events to the endpoint need an access token.

    -

    It is only possible to submit workflow templates your access token has access to: example role.

    -

    Example (note the trailing slash):

    -
    curl https://localhost:2746/api/v1/events/argo/ \
    -  -H "Authorization: $ARGO_TOKEN" \
    -  -d '{"message": "hello"}'
    -
    -

    With a discriminator:

    -
    curl https://localhost:2746/api/v1/events/argo/my-discriminator \
    -  -H "Authorization: $ARGO_TOKEN" \
    -  -d '{"message": "hello"}'
    -
    -

    The event endpoint will always return in under 10 seconds because the event will be queued and processed asynchronously. This means you will not be notified synchronously of failure. It will return a failure (503) if the event processing queue is full.

    -
    -

    Processing Order

    -

    Events may not always be processed in the order they are received.

    -
    -

    Workflow Template triggered by the event

    -

    Before the binding between an event and a workflow template, you must create the workflow template that you want to trigger. -The following one takes in input the "message" parameter specified into the API call body, passed through the WorkflowEventBinding parameters section, and finally resolved here as the message of the whalesay image.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: WorkflowTemplate
    -metadata:
    -  name: my-wf-tmple
    -  namespace: argo
    -spec:
    -  templates:
    -    - name: main
    -      inputs:
    -        parameters:
    -          - name: message
    -            value: "{{workflow.parameters.message}}"
    -      container:
    -        image: docker/whalesay:latest
    -        command: [cowsay]
    -        args: ["{{inputs.parameters.message}}"]
    -  entrypoint: main
    -
    -

    Submitting A Workflow From A Workflow Template

    -

    A workflow template will be submitted (i.e. workflow created from it) and that can be created using parameters from the event itself. -The following example will be triggered by an event with "message" in the payload. That message will be used as an argument for the created workflow. Note that the name of the meta-data header "x-argo-e2e" is lowercase in the selector to match. Incoming header names are converted to lowercase.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: WorkflowEventBinding
    -metadata:
    -  name: event-consumer
    -spec:
    -  event:
    -    # metadata header name must be lowercase to match in selector
    -    selector: payload.message != "" && metadata["x-argo-e2e"] == ["true"] && discriminator == "my-discriminator"
    -  submit:
    -    workflowTemplateRef:
    -      name: my-wf-tmple
    -    arguments:
    -      parameters:
    -      - name: message
    -        valueFrom:
    -          event: payload.message
    -
    -

    Please, notice that workflowTemplateRef refers to a template with the name my-wf-tmple, this template has to be created before the triggering of the event. -After that you have to apply the above explained WorkflowEventBinding (in this example this is called event-template.yml) to realize the binding between Workflow Template and event (you can use kubectl to do that):

    -
    kubectl apply -f event-template.yml
    -
    -

    Finally you can trigger the creation of your first parametrized workflow template, by using the following call:

    -

    Event:

    -
    curl $ARGO_SERVER/api/v1/events/argo/my-discriminator \
    -    -H "Authorization: $ARGO_TOKEN" \
    -    -H "X-Argo-E2E: true" \
    -    -d '{"message": "hello events"}'
    -
    -
    -

    Malformed Expressions

    -

    If the expression is malformed, this is logged. It is not visible in logs or the UI.

    -
    -

    Customizing the Workflow Meta-Data

    -

    You can customize the name of the submitted workflow as well as add annotations and -labels. This is done by adding a metadata object to the submit object.

    -

    Normally the name of the workflow created from an event is simply the name of the -template with a time-stamp appended. This can be customized by setting the name in the -metadata object.

    -

    Annotations and labels are added in the same fashion.

    -

    All the values for the name, annotations and labels are treated as expressions (see -below for details). The metadata object is the same metadata type as on all -Kubernetes resources and as such is parsed in the same manner. It is best to enclose -the expression in single quotes to avoid any problems when submitting the event -binding to Kubernetes.

    -

    This is an example snippet of how to set the name, annotations and labels. This is -based on the workflow binding from above, and the first event.

    -
    submit:
    -  metadata:
    -    annotations:
    -      anAnnotation: 'event.payload.message'
    -    name: 'event.payload.message + "-world"'
    -    labels:
    -      someLabel: '"literal string"'
    -
    -

    This will result in the workflow being named "hello-world" instead of -my-wf-tmple-<timestamp>. There will be an extra label with the key someLabel and -a value of "literal string". There will also be an extra annotation with the key -anAnnotation and a value of "hello"

    -

    Be careful when setting the name. If the name expression evaluates to that of a currently -existing workflow, the new workflow will fail to submit.

    -

    The name, annotation and label expression must evaluate to a string and follow the normal Kubernetes naming -requirements.

    -

    Event Expression Syntax and the Event Expression Environment

    -

    Event expressions are expressions that are evaluated over the event expression environment.

    -

    Expression Syntax

    -

    Because the endpoint accepts any JSON data, it is the user's responsibility to write a suitable expression to correctly filter the events they are interested in. Therefore, DO NOT assume the existence of any fields, and guard against them using a nil check.

    -

    Learn more about expression syntax.

    -

    Expression Environment

    -

    The event environment contains:

    -
      -
    • payload the event payload.
    • -
    • metadata event meta-data, including HTTP headers.
    • -
    • discriminator the discriminator from the URL.
    • -
    -

    Payload

    -

    This is the JSON payload of the event.

    -

    Example:

    -
    payload.repository.clone_url == "http://gihub.com/argoproj/argo"
    -
    -

    Meta-Data

    -

    Meta-data is data about the event, this includes headers:

    -

    Headers

    -

    HTTP header names are lowercase and only include those that have x- as their prefix. Their values are lists, not single values.

    -
      -
    • Wrong: metadata["X-Github-Event"] == "push"
    • -
    • Wrong: metadata["x-github-event"] == "push"
    • -
    • Wrong: metadata["X-Github-Event"] == ["push"]
    • -
    • Wrong: metadata["github-event"] == ["push"]
    • -
    • Wrong: metadata["authorization"] == ["push"]
    • -
    • Right: metadata["x-github-event"] == ["push"]
    • -
    -

    Example:

    -
    metadata["x-argo"] == ["yes"]
    -
    -

    Discriminator

    -

    This is only for edge-cases where neither the payload, or meta-data provide enough information to discriminate. Typically, it should be empty and ignored.

    -

    Example:

    -
    discriminator == "my-discriminator"
    -
    -

    High-Availability

    -
    -

    Run Minimum 2 Replicas

    -

    You MUST run a minimum of two Argo Server replicas if you do not want to lose events.

    -
    -

    If you are processing large numbers of events, you may need to scale up the Argo Server to handle them. By default, a single Argo Server can be processing 64 events before the endpoint will start returning 503 errors.

    -

    Vertically you can:

    -
      -
    • Increase the size of the event operation queue --event-operation-queue-size (good for temporary event bursts).
    • -
    • Increase the number of workers --event-worker-count (good for sustained numbers of events).
    • -
    -

    Horizontally you can:

    -
      -
    • Run more Argo Servers (good for sustained numbers of events AND high-availability).
    • -
    - - - - -

    Comments

    - - +

    Events - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/executor_plugins/index.html b/executor_plugins/index.html index 2015f94fc03e..d6ce10b88666 100644 --- a/executor_plugins/index.html +++ b/executor_plugins/index.html @@ -1,4368 +1,68 @@ - - - - - - - - - - - - - Executor Plugins - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Executor Plugins - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    - -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Executor Plugins

    -
    -

    Since v3.3

    -
    -

    Configuration

    -

    Plugins are disabled by default. To enable them, start the controller with ARGO_EXECUTOR_PLUGINS=true, e.g.

    -
    apiVersion: apps/v1
    -kind: Deployment
    -metadata:
    -  name: workflow-controller
    -spec:
    -  template:
    -    spec:
    -      containers:
    -        - name: workflow-controller
    -          env:
    -            - name: ARGO_EXECUTOR_PLUGINS
    -              value: "true"
    -
    -

    When using the Helm chart, add this to your values.yaml:

    -
    controller:
    -  extraEnv:
    -    - name: ARGO_EXECUTOR_PLUGINS
    -      value: "true"
    -
    -

    Template Executor

    -

    This is a plugin that runs custom "plugin" templates, e.g. for non-pod tasks such as Tekton builds, Spark jobs, sending -Slack notifications.

    -

    A Simple Python Plugin

    -

    Let's make a Python plugin that prints "hello" each time the workflow is operated on.

    -

    We need the following:

    -
      -
    1. Plugins enabled (see above).
    2. -
    3. A HTTP server that will be run as a sidecar to the main container and will respond to RPC HTTP requests from the - executor with this API contract.
    4. -
    5. A plugin.yaml configuration file, that is turned into a config map so the controller can discover the plugin.
    6. -
    -

    A template executor plugin services HTTP POST requests on /api/v1/template.execute:

    -
    curl http://localhost:4355/api/v1/template.execute -d \
    -'{
    -  "workflow": {
    -    "metadata": {
    -      "name": "my-wf"
    -    }
    -  },
    -  "template": {
    -    "name": "my-tmpl",
    -    "inputs": {},
    -    "outputs": {},
    -    "plugin": {
    -      "hello": {}
    -    }
    -  }
    -}'
    -# ...
    -HTTP/1.1 200 OK
    -{
    -  "node": {
    -    "phase": "Succeeded",
    -    "message": "Hello template!"
    -  }
    -}
    -
    -

    Tip: The port number can be anything, but must not conflict with other plugins. Don't use common ports such as 80, -443, 8080, 8081, 8443. If you plan to publish your plugin, choose a random port number under 10,000 and create a PR to -add your plugin. If not, use a port number greater than 10,000.

    -

    We'll need to create a script that starts a HTTP server. Save this as server.py:

    -
    import json
    -from http.server import BaseHTTPRequestHandler, HTTPServer
    -
    -with open("/var/run/argo/token") as f:
    -    token = f.read().strip()
    -
    -
    -class Plugin(BaseHTTPRequestHandler):
    -
    -    def args(self):
    -        return json.loads(self.rfile.read(int(self.headers.get('Content-Length'))))
    -
    -    def reply(self, reply):
    -        self.send_response(200)
    -        self.end_headers()
    -        self.wfile.write(json.dumps(reply).encode("UTF-8"))
    -
    -    def forbidden(self):
    -        self.send_response(403)
    -        self.end_headers()
    -
    -    def unsupported(self):
    -        self.send_response(404)
    -        self.end_headers()
    -
    -    def do_POST(self):
    -        if self.headers.get("Authorization") != "Bearer " + token:
    -            self.forbidden()
    -        elif self.path == '/api/v1/template.execute':
    -            args = self.args()
    -            if 'hello' in args['template'].get('plugin', {}):
    -                self.reply(
    -                    {'node': {'phase': 'Succeeded', 'message': 'Hello template!',
    -                              'outputs': {'parameters': [{'name': 'foo', 'value': 'bar'}]}}})
    -            else:
    -                self.reply({})
    -        else:
    -            self.unsupported()
    -
    -
    -if __name__ == '__main__':
    -    httpd = HTTPServer(('', 4355), Plugin)
    -    httpd.serve_forever()
    -
    -

    Tip: Plugins can be written in any language you can run as a container. Python is convenient because you can embed -the script in the container.

    -

    Some things to note here:

    -
      -
    • You only need to implement the calls you need. Return 404 and it won't be called again.
    • -
    • The path is the RPC method name.
    • -
    • You should check that the Authorization header contains the same value as /var/run/argo/token. Return 403 if not
    • -
    • The request body contains the template's input parameters.
    • -
    • The response body may contain the node's result, including the phase (e.g. "Succeeded" or "Failed") and a message.
    • -
    • If the response is {}, then the plugin is saying it cannot execute the plugin template, e.g. it is a Slack plugin, - but the template is a Tekton job.
    • -
    • If the status code is 404, then the plugin will not be called again.
    • -
    • If you save the file as server.*, it will be copied to the sidecar container's args field. This is useful for building self-contained plugins in scripting languages like Python or Node.JS.
    • -
    -

    Next, create a manifest named plugin.yaml:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: ExecutorPlugin
    -metadata:
    -  name: hello
    -spec:
    -  sidecar:
    -    container:
    -      command:
    -        - python
    -        - -u # disables output buffering
    -        - -c
    -      image: python:alpine3.6
    -      name: hello-executor-plugin
    -      ports:
    -        - containerPort: 4355
    -      securityContext:
    -        runAsNonRoot: true
    -        runAsUser: 65534 # nobody
    -      resources:
    -        requests:
    -          memory: "64Mi"
    -          cpu: "250m"
    -        limits:
    -          memory: "128Mi"
    -          cpu: "500m"
    -
    -

    Build and install as follows:

    -
    argo executor-plugin build .
    -kubectl -n argo apply -f hello-executor-plugin-configmap.yaml
    -
    -

    Check your controller logs:

    -
    level=info msg="Executor plugin added" name=hello-controller-plugin
    -
    -

    Run this workflow.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: hello-
    -spec:
    -  entrypoint: main
    -  templates:
    -    - name: main
    -      plugin:
    -        hello: { }
    -
    -

    You'll see the workflow complete successfully.

    -

    Discovery

    -

    When a workflow is run, plugins are loaded from:

    -
      -
    • The workflow's namespace.
    • -
    • The Argo installation namespace (typically argo).
    • -
    -

    If two plugins have the same name, only the one in the workflow's namespace is loaded.

    -

    Secrets

    -

    If you interact with a third-party system, you'll need access to secrets. Don't put them in plugin.yaml. Use a secret:

    -
    spec:
    -  sidecar:
    -    container:
    -      env:
    -        - name: URL
    -          valueFrom:
    -            secretKeyRef:
    -              name: slack-executor-plugin
    -              key: URL
    -
    -

    Refer to the Kubernetes Secret documentation for secret best practices and security considerations.

    -

    Resources, Security Context

    -

    We made these mandatory, so no one can create a plugin that uses an unreasonable amount of memory, or run as root unless -they deliberately do so:

    -
    spec:
    -  sidecar:
    -    container:
    -      resources:
    -        requests:
    -          cpu: 100m
    -          memory: 32Mi
    -        limits:
    -          cpu: 200m
    -          memory: 64Mi
    -      securityContext:
    -        runAsNonRoot: true
    -        runAsUser: 1000
    -
    -

    Failure

    -

    A plugin may fail as follows:

    -
      -
    • Connection/socket error - considered transient.
    • -
    • Timeout - considered transient.
    • -
    • 404 error - method is not supported by the plugin, as a result the method will not be called again (in the same workflow).
    • -
    • 503 error - considered transient.
    • -
    • Other 4xx/5xx errors - considered fatal.
    • -
    -

    Transient errors are retried, all other errors are considered fatal.

    -

    Fatal errors will result in failed steps.

    -

    Re-Queue

    -

    It might be the case that the plugin can't finish straight away. E.g. it starts a long running task. When that happens, -you return "Pending" or "Running" a and a re-queue time:

    -
    {
    -  "node": {
    -    "phase": "Running",
    -    "message": "Long-running task started"
    -  },
    -  "requeue": "2m"
    -}
    -
    -

    In this example, the task will be re-queued and template.execute will be called again in 2 minutes.

    -

    Debugging

    -

    You can find the plugin's log in the agent pod's sidecar, e.g.:

    -
    kubectl -n argo logs ${agentPodName} -c hello-executor-plugin
    -
    -

    Listing Plugins

    -

    Because plugins are just config maps, you can list them using kubectl:

    -
    kubectl get cm -l workflows.argoproj.io/configmap-type=ExecutorPlugin
    -
    -

    Examples and Community Contributed Plugins

    -

    Plugin directory

    -

    Publishing Your Plugin

    -

    If you want to publish and share you plugin (we hope you do!), then submit a pull request to add it to the above -directory.

    - - - - -

    Comments

    - - +

    Executor Plugins - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/executor_swagger/index.html b/executor_swagger/index.html index 04e72ce42ae5..80f72afa03a6 100644 --- a/executor_swagger/index.html +++ b/executor_swagger/index.html @@ -1,25967 +1,68 @@ - - - - - - - - - - - - - The API for an executor plugin. - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + The API for an executor plugin. - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    The API for an executor plugin.

    -

    Informations

    -

    Version

    -

    0.0.1

    -

    Content negotiation

    -

    URI Schemes

    -
      -
    • http
    • -
    -

    Consumes

    -
      -
    • application/json
    • -
    -

    Produces

    -
      -
    • application/json
    • -
    -

    All endpoints

    -

    operations

    - - - - - - - - - - - - - - - - - -
    MethodURINameSummary
    POST/api/v1/template.executeexecute template
    -

    Paths

    -

    execute template (executeTemplate)

    -
    POST /api/v1/template.execute
    -
    -

    Parameters

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameSourceTypeGo typeSeparatorRequiredDefaultDescription
    BodybodyExecuteTemplateArgsmodels.ExecuteTemplateArgs
    -

    All responses

    - - - - - - - - - - - - - - - - - - - -
    CodeStatusDescriptionHas headersSchema
    200OKschema
    -

    Responses

    -
    200
    -

    Status: OK

    -
    Schema
    -

    ExecuteTemplateReply

    -

    Models

    -

    AWSElasticBlockStoreVolumeSource

    -
    -

    An AWS EBS disk must exist before mounting to a container. The disk -must also be in the same AWS zone as the kubelet. An AWS EBS disk -can only be mounted as read/write once. AWS EBS volumes support -ownership management and SELinux relabeling.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    fsTypestringstringfsType is the filesystem type of the volume that you want to mount.
    Tip: Ensure that the filesystem type is supported by the host operating system.
    Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
    More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
    TODO: how do we prevent errors in the filesystem from compromising the machine
    +optional
    partitionint32 (formatted integer)int32partition is the partition in the volume that you want to mount.
    If omitted, the default is to mount by volume name.
    Examples: For volume /dev/sda1, you specify the partition as "1".
    Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
    +optional
    readOnlybooleanboolreadOnly value true will force the readOnly setting in VolumeMounts.
    More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
    +optional
    volumeIDstringstringvolumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).
    More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
    -

    Affinity

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    nodeAffinityNodeAffinityNodeAffinity
    podAffinityPodAffinityPodAffinity
    podAntiAffinityPodAntiAffinityPodAntiAffinity
    -

    Amount

    -
    -

    +kubebuilder:validation:Type=number

    -
    -

    interface{}

    -

    AnyString

    -
    -

    It will unmarshall int64, int32, float64, float32, boolean, a plain string and represents it as string. -It will marshall back to string - marshalling is not symmetric.

    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    AnyStringstringstringIt will unmarshall int64, int32, float64, float32, boolean, a plain string and represents it as string.
    It will marshall back to string - marshalling is not symmetric.
    -

    ArchiveStrategy

    -
    -

    ArchiveStrategy describes how to archive files/directory when saving artifacts

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    noneNoneStrategyNoneStrategy
    tarTarStrategyTarStrategy
    zipZipStrategyZipStrategy
    -

    Arguments

    -
    -

    Arguments to a template

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    artifactsArtifactsArtifacts
    parameters[]Parameter[]*ParameterParameters is the list of parameters to pass to the template or workflow
    +patchStrategy=merge
    +patchMergeKey=name
    -

    Artifact

    -
    -

    Artifact indicates an artifact to place at a specified path

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    archiveArchiveStrategyArchiveStrategy
    archiveLogsbooleanboolArchiveLogs indicates if the container logs should be archived
    artifactGCArtifactGCArtifactGC
    artifactoryArtifactoryArtifactArtifactoryArtifact
    azureAzureArtifactAzureArtifact
    deletedbooleanboolHas this been deleted?
    fromstringstringFrom allows an artifact to reference an artifact from a previous step
    fromExpressionstringstringFromExpression, if defined, is evaluated to specify the value for the artifact
    gcsGCSArtifactGCSArtifact
    gitGitArtifactGitArtifact
    globalNamestringstringGlobalName exports an output artifact to the global scope, making it available as
    '{{workflow.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts
    hdfsHDFSArtifactHDFSArtifact
    httpHTTPArtifactHTTPArtifact
    modeint32 (formatted integer)int32mode bits to use on this file, must be a value between 0 and 0777
    set when loading input artifacts.
    namestringstringname of the artifact. must be unique within a template's inputs/outputs.
    optionalbooleanboolMake Artifacts optional, if Artifacts doesn't generate or exist
    ossOSSArtifactOSSArtifact
    pathstringstringPath is the container path to the artifact
    rawRawArtifactRawArtifact
    recurseModebooleanboolIf mode is set, apply the permission recursively into the artifact if it is a folder
    s3S3ArtifactS3Artifact
    subPathstringstringSubPath allows an artifact to be sourced from a subpath within the specified source
    -

    ArtifactGC

    -
    -

    ArtifactGC describes how to delete artifacts from completed Workflows - this is embedded into the WorkflowLevelArtifactGC, and also used for individual Artifacts to override that as needed

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    podMetadataMetadataMetadata
    serviceAccountNamestringstringServiceAccountName is an optional field for specifying the Service Account that should be assigned to the Pod doing the deletion
    strategyArtifactGCStrategyArtifactGCStrategy
    -

    ArtifactGCStrategy

    - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    ArtifactGCStrategystringstring
    -

    ArtifactLocation

    -
    -

    It is used as single artifact in the context of inputs/outputs (e.g. outputs.artifacts.artname). -It is also used to describe the location of multiple artifacts such as the archive location -of a single workflow step, which the executor will use as a default location to store its files.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    archiveLogsbooleanboolArchiveLogs indicates if the container logs should be archived
    artifactoryArtifactoryArtifactArtifactoryArtifact
    azureAzureArtifactAzureArtifact
    gcsGCSArtifactGCSArtifact
    gitGitArtifactGitArtifact
    hdfsHDFSArtifactHDFSArtifact
    httpHTTPArtifactHTTPArtifact
    ossOSSArtifactOSSArtifact
    rawRawArtifactRawArtifact
    s3S3ArtifactS3Artifact
    -

    ArtifactPaths

    -
    -

    ArtifactPaths expands a step from a collection of artifacts

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    archiveArchiveStrategyArchiveStrategy
    archiveLogsbooleanboolArchiveLogs indicates if the container logs should be archived
    artifactGCArtifactGCArtifactGC
    artifactoryArtifactoryArtifactArtifactoryArtifact
    azureAzureArtifactAzureArtifact
    deletedbooleanboolHas this been deleted?
    fromstringstringFrom allows an artifact to reference an artifact from a previous step
    fromExpressionstringstringFromExpression, if defined, is evaluated to specify the value for the artifact
    gcsGCSArtifactGCSArtifact
    gitGitArtifactGitArtifact
    globalNamestringstringGlobalName exports an output artifact to the global scope, making it available as
    '{{workflow.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts
    hdfsHDFSArtifactHDFSArtifact
    httpHTTPArtifactHTTPArtifact
    modeint32 (formatted integer)int32mode bits to use on this file, must be a value between 0 and 0777
    set when loading input artifacts.
    namestringstringname of the artifact. must be unique within a template's inputs/outputs.
    optionalbooleanboolMake Artifacts optional, if Artifacts doesn't generate or exist
    ossOSSArtifactOSSArtifact
    pathstringstringPath is the container path to the artifact
    rawRawArtifactRawArtifact
    recurseModebooleanboolIf mode is set, apply the permission recursively into the artifact if it is a folder
    s3S3ArtifactS3Artifact
    subPathstringstringSubPath allows an artifact to be sourced from a subpath within the specified source
    -

    ArtifactoryArtifact

    -
    -

    ArtifactoryArtifact is the location of an artifactory artifact

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    passwordSecretSecretKeySelectorSecretKeySelector
    urlstringstringURL of the artifact
    usernameSecretSecretKeySelectorSecretKeySelector
    -

    Artifacts

    -

    []Artifact

    -

    AzureArtifact

    -
    -

    AzureArtifact is the location of a an Azure Storage artifact

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    accountKeySecretSecretKeySelectorSecretKeySelector
    blobstringstringBlob is the blob name (i.e., path) in the container where the artifact resides
    containerstringstringContainer is the container where resources will be stored
    endpointstringstringEndpoint is the service url associated with an account. It is most likely "https://.blob.core.windows.net"
    useSDKCredsbooleanboolUseSDKCreds tells the driver to figure out credentials based on sdk defaults.
    -

    AzureDataDiskCachingMode

    -
    -

    +enum

    -
    - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    AzureDataDiskCachingModestringstring+enum
    -

    AzureDataDiskKind

    -
    -

    +enum

    -
    - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    AzureDataDiskKindstringstring+enum
    -

    AzureDiskVolumeSource

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    cachingModeAzureDataDiskCachingModeAzureDataDiskCachingMode
    diskNamestringstringdiskName is the Name of the data disk in the blob storage
    diskURIstringstringdiskURI is the URI of data disk in the blob storage
    fsTypestringstringfsType is Filesystem type to mount.
    Must be a filesystem type supported by the host operating system.
    Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
    +optional
    kindAzureDataDiskKindAzureDataDiskKind
    readOnlybooleanboolreadOnly Defaults to false (read/write). ReadOnly here will force
    the ReadOnly setting in VolumeMounts.
    +optional
    -

    AzureFileVolumeSource

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    readOnlybooleanboolreadOnly defaults to false (read/write). ReadOnly here will force
    the ReadOnly setting in VolumeMounts.
    +optional
    secretNamestringstringsecretName is the name of secret that contains Azure Storage Account Name and Key
    shareNamestringstringshareName is the azure share Name
    -

    Backoff

    -
    -

    Backoff is a backoff strategy to use within retryStrategy

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    durationstringstringDuration is the amount to back off. Default unit is seconds, but could also be a duration (e.g. "2m", "1h")
    factorIntOrStringIntOrString
    maxDurationstringstringMaxDuration is the maximum amount of time allowed for a workflow in the backoff strategy
    -

    BasicAuth

    -
    -

    BasicAuth describes the secret selectors required for basic authentication

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    passwordSecretSecretKeySelectorSecretKeySelector
    usernameSecretSecretKeySelectorSecretKeySelector
    -

    CSIVolumeSource

    -
    -

    Represents a source location of a volume to mount, managed by an external CSI driver

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    driverstringstringdriver is the name of the CSI driver that handles this volume.
    Consult with your admin for the correct name as registered in the cluster.
    fsTypestringstringfsType to mount. Ex. "ext4", "xfs", "ntfs".
    If not provided, the empty value is passed to the associated CSI driver
    which will determine the default filesystem to apply.
    +optional
    nodePublishSecretRefLocalObjectReferenceLocalObjectReference
    readOnlybooleanboolreadOnly specifies a read-only configuration for the volume.
    Defaults to false (read/write).
    +optional
    volumeAttributesmap of stringmap[string]stringvolumeAttributes stores driver-specific properties that are passed to the CSI
    driver. Consult your driver's documentation for supported values.
    +optional
    -

    Cache

    -
    -

    Cache is the configuration for the type of cache to be used

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    configMapConfigMapKeySelectorConfigMapKeySelector
    -

    Capabilities

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    add[]Capability[]CapabilityAdded capabilities
    +optional
    drop[]Capability[]CapabilityRemoved capabilities
    +optional
    -

    Capability

    -
    -

    Capability represent POSIX capabilities type

    -
    - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    CapabilitystringstringCapability represent POSIX capabilities type
    -

    CephFSVolumeSource

    -
    -

    Represents a Ceph Filesystem mount that lasts the lifetime of a pod -Cephfs volumes do not support ownership management or SELinux relabeling.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    monitors[]string[]stringmonitors is Required: Monitors is a collection of Ceph monitors
    More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
    pathstringstringpath is Optional: Used as the mounted root, rather than the full Ceph tree, default is /
    +optional
    readOnlybooleanboolreadOnly is Optional: Defaults to false (read/write). ReadOnly here will force
    the ReadOnly setting in VolumeMounts.
    More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
    +optional
    secretFilestringstringsecretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
    More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
    +optional
    secretRefLocalObjectReferenceLocalObjectReference
    userstringstringuser is optional: User is the rados user name, default is admin
    More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
    +optional
    -

    CinderVolumeSource

    -
    -

    A Cinder volume must exist before mounting to a container. -The volume must also be in the same region as the kubelet. -Cinder volumes support ownership management and SELinux relabeling.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    fsTypestringstringfsType is the filesystem type to mount.
    Must be a filesystem type supported by the host operating system.
    Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
    More info: https://examples.k8s.io/mysql-cinder-pd/README.md
    +optional
    readOnlybooleanboolreadOnly defaults to false (read/write). ReadOnly here will force
    the ReadOnly setting in VolumeMounts.
    More info: https://examples.k8s.io/mysql-cinder-pd/README.md
    +optional
    secretRefLocalObjectReferenceLocalObjectReference
    volumeIDstringstringvolumeID used to identify the volume in cinder.
    More info: https://examples.k8s.io/mysql-cinder-pd/README.md
    -

    ClientCertAuth

    -
    -

    ClientCertAuth holds necessary information for client authentication via certificates

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    clientCertSecretSecretKeySelectorSecretKeySelector
    clientKeySecretSecretKeySelectorSecretKeySelector
    -

    ConfigMapEnvSource

    -
    -

    The contents of the target ConfigMap's Data field will represent the -key-value pairs as environment variables.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    namestringstringName of the referent.
    More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
    TODO: Add other useful fields. apiVersion, kind, uid?
    +optional
    optionalbooleanboolSpecify whether the ConfigMap must be defined
    +optional
    -

    ConfigMapKeySelector

    -
    -

    +structType=atomic

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    keystringstringThe key to select.
    namestringstringName of the referent.
    More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
    TODO: Add other useful fields. apiVersion, kind, uid?
    +optional
    optionalbooleanboolSpecify whether the ConfigMap or its key must be defined
    +optional
    -

    ConfigMapProjection

    -
    -

    The contents of the target ConfigMap's Data field will be presented in a -projected volume as files using the keys in the Data field as the file names, -unless the items element is populated with specific mappings of keys to paths. -Note that this is identical to a configmap volume source without the default -mode.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    items[]KeyToPath[]*KeyToPathitems if unspecified, each key-value pair in the Data field of the referenced
    ConfigMap will be projected into the volume as a file whose name is the
    key and content is the value. If specified, the listed keys will be
    projected into the specified paths, and unlisted keys will not be
    present. If a key is specified which is not present in the ConfigMap,
    the volume setup will error unless it is marked optional. Paths must be
    relative and may not contain the '..' path or start with '..'.
    +optional
    namestringstringName of the referent.
    More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
    TODO: Add other useful fields. apiVersion, kind, uid?
    +optional
    optionalbooleanbooloptional specify whether the ConfigMap or its keys must be defined
    +optional
    -

    ConfigMapVolumeSource

    -
    -

    The contents of the target ConfigMap's Data field will be presented in a -volume as files using the keys in the Data field as the file names, unless -the items element is populated with specific mappings of keys to paths. -ConfigMap volumes support ownership management and SELinux relabeling.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    defaultModeint32 (formatted integer)int32defaultMode is optional: mode bits used to set permissions on created files by default.
    Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
    YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
    Defaults to 0644.
    Directories within the path are not affected by this setting.
    This might be in conflict with other options that affect the file
    mode, like fsGroup, and the result can be other mode bits set.
    +optional
    items[]KeyToPath[]*KeyToPathitems if unspecified, each key-value pair in the Data field of the referenced
    ConfigMap will be projected into the volume as a file whose name is the
    key and content is the value. If specified, the listed keys will be
    projected into the specified paths, and unlisted keys will not be
    present. If a key is specified which is not present in the ConfigMap,
    the volume setup will error unless it is marked optional. Paths must be
    relative and may not contain the '..' path or start with '..'.
    +optional
    namestringstringName of the referent.
    More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
    TODO: Add other useful fields. apiVersion, kind, uid?
    +optional
    optionalbooleanbooloptional specify whether the ConfigMap or its keys must be defined
    +optional
    -

    Container

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    args[]string[]stringArguments to the entrypoint.
    The container image's CMD is used if this is not provided.
    Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
    cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
    to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
    produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
    of whether the variable exists or not. Cannot be updated.
    More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
    +optional
    command[]string[]stringEntrypoint array. Not executed within a shell.
    The container image's ENTRYPOINT is used if this is not provided.
    Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
    cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
    to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
    produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
    of whether the variable exists or not. Cannot be updated.
    More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
    +optional
    env[]EnvVar[]*EnvVarList of environment variables to set in the container.
    Cannot be updated.
    +optional
    +patchMergeKey=name
    +patchStrategy=merge
    envFrom[]EnvFromSource[]*EnvFromSourceList of sources to populate environment variables in the container.
    The keys defined within a source must be a C_IDENTIFIER. All invalid keys
    will be reported as an event when the container is starting. When a key exists in multiple
    sources, the value associated with the last source will take precedence.
    Values defined by an Env with a duplicate key will take precedence.
    Cannot be updated.
    +optional
    imagestringstringContainer image name.
    More info: https://kubernetes.io/docs/concepts/containers/images
    This field is optional to allow higher level config management to default or override
    container images in workload controllers like Deployments and StatefulSets.
    +optional
    imagePullPolicyPullPolicyPullPolicy
    lifecycleLifecycleLifecycle
    livenessProbeProbeProbe
    namestringstringName of the container specified as a DNS_LABEL.
    Each container in a pod must have a unique name (DNS_LABEL).
    Cannot be updated.
    ports[]ContainerPort[]*ContainerPortList of ports to expose from the container. Exposing a port here gives
    the system additional information about the network connections a
    container uses, but is primarily informational. Not specifying a port here
    DOES NOT prevent that port from being exposed. Any port which is
    listening on the default "0.0.0.0" address inside a container will be
    accessible from the network.
    Cannot be updated.
    +optional
    +patchMergeKey=containerPort
    +patchStrategy=merge
    +listType=map
    +listMapKey=containerPort
    +listMapKey=protocol
    readinessProbeProbeProbe
    resourcesResourceRequirementsResourceRequirements
    securityContextSecurityContextSecurityContext
    startupProbeProbeProbe
    stdinbooleanboolWhether this container should allocate a buffer for stdin in the container runtime. If this
    is not set, reads from stdin in the container will always result in EOF.
    Default is false.
    +optional
    stdinOncebooleanboolWhether the container runtime should close the stdin channel after it has been opened by
    a single attach. When stdin is true the stdin stream will remain open across multiple attach
    sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
    first client attaches to stdin, and then remains open and accepts data until the client disconnects,
    at which time stdin is closed and remains closed until the container is restarted. If this
    flag is false, a container processes that reads from stdin will never receive an EOF.
    Default is false
    +optional
    terminationMessagePathstringstringOptional: Path at which the file to which the container's termination message
    will be written is mounted into the container's filesystem.
    Message written is intended to be brief final status, such as an assertion failure message.
    Will be truncated by the node if greater than 4096 bytes. The total message length across
    all containers will be limited to 12kb.
    Defaults to /dev/termination-log.
    Cannot be updated.
    +optional
    terminationMessagePolicyTerminationMessagePolicyTerminationMessagePolicy
    ttybooleanboolWhether this container should allocate a TTY for itself, also requires 'stdin' to be true.
    Default is false.
    +optional
    volumeDevices[]VolumeDevice[]*VolumeDevicevolumeDevices is the list of block devices to be used by the container.
    +patchMergeKey=devicePath
    +patchStrategy=merge
    +optional
    volumeMounts[]VolumeMount[]*VolumeMountPod volumes to mount into the container's filesystem.
    Cannot be updated.
    +optional
    +patchMergeKey=mountPath
    +patchStrategy=merge
    workingDirstringstringContainer's working directory.
    If not specified, the container runtime's default will be used, which
    might be configured in the container image.
    Cannot be updated.
    +optional
    -

    ContainerNode

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    args[]string[]stringArguments to the entrypoint.
    The container image's CMD is used if this is not provided.
    Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
    cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
    to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
    produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
    of whether the variable exists or not. Cannot be updated.
    More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
    +optional
    command[]string[]stringEntrypoint array. Not executed within a shell.
    The container image's ENTRYPOINT is used if this is not provided.
    Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
    cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
    to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
    produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
    of whether the variable exists or not. Cannot be updated.
    More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
    +optional
    dependencies[]string[]string
    env[]EnvVar[]*EnvVarList of environment variables to set in the container.
    Cannot be updated.
    +optional
    +patchMergeKey=name
    +patchStrategy=merge
    envFrom[]EnvFromSource[]*EnvFromSourceList of sources to populate environment variables in the container.
    The keys defined within a source must be a C_IDENTIFIER. All invalid keys
    will be reported as an event when the container is starting. When a key exists in multiple
    sources, the value associated with the last source will take precedence.
    Values defined by an Env with a duplicate key will take precedence.
    Cannot be updated.
    +optional
    imagestringstringContainer image name.
    More info: https://kubernetes.io/docs/concepts/containers/images
    This field is optional to allow higher level config management to default or override
    container images in workload controllers like Deployments and StatefulSets.
    +optional
    imagePullPolicyPullPolicyPullPolicy
    lifecycleLifecycleLifecycle
    livenessProbeProbeProbe
    namestringstringName of the container specified as a DNS_LABEL.
    Each container in a pod must have a unique name (DNS_LABEL).
    Cannot be updated.
    ports[]ContainerPort[]*ContainerPortList of ports to expose from the container. Exposing a port here gives
    the system additional information about the network connections a
    container uses, but is primarily informational. Not specifying a port here
    DOES NOT prevent that port from being exposed. Any port which is
    listening on the default "0.0.0.0" address inside a container will be
    accessible from the network.
    Cannot be updated.
    +optional
    +patchMergeKey=containerPort
    +patchStrategy=merge
    +listType=map
    +listMapKey=containerPort
    +listMapKey=protocol
    readinessProbeProbeProbe
    resourcesResourceRequirementsResourceRequirements
    securityContextSecurityContextSecurityContext
    startupProbeProbeProbe
    stdinbooleanboolWhether this container should allocate a buffer for stdin in the container runtime. If this
    is not set, reads from stdin in the container will always result in EOF.
    Default is false.
    +optional
    stdinOncebooleanboolWhether the container runtime should close the stdin channel after it has been opened by
    a single attach. When stdin is true the stdin stream will remain open across multiple attach
    sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
    first client attaches to stdin, and then remains open and accepts data until the client disconnects,
    at which time stdin is closed and remains closed until the container is restarted. If this
    flag is false, a container processes that reads from stdin will never receive an EOF.
    Default is false
    +optional
    terminationMessagePathstringstringOptional: Path at which the file to which the container's termination message
    will be written is mounted into the container's filesystem.
    Message written is intended to be brief final status, such as an assertion failure message.
    Will be truncated by the node if greater than 4096 bytes. The total message length across
    all containers will be limited to 12kb.
    Defaults to /dev/termination-log.
    Cannot be updated.
    +optional
    terminationMessagePolicyTerminationMessagePolicyTerminationMessagePolicy
    ttybooleanboolWhether this container should allocate a TTY for itself, also requires 'stdin' to be true.
    Default is false.
    +optional
    volumeDevices[]VolumeDevice[]*VolumeDevicevolumeDevices is the list of block devices to be used by the container.
    +patchMergeKey=devicePath
    +patchStrategy=merge
    +optional
    volumeMounts[]VolumeMount[]*VolumeMountPod volumes to mount into the container's filesystem.
    Cannot be updated.
    +optional
    +patchMergeKey=mountPath
    +patchStrategy=merge
    workingDirstringstringContainer's working directory.
    If not specified, the container runtime's default will be used, which
    might be configured in the container image.
    Cannot be updated.
    +optional
    -

    ContainerPort

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    containerPortint32 (formatted integer)int32Number of port to expose on the pod's IP address.
    This must be a valid port number, 0 < x < 65536.
    hostIPstringstringWhat host IP to bind the external port to.
    +optional
    hostPortint32 (formatted integer)int32Number of port to expose on the host.
    If specified, this must be a valid port number, 0 < x < 65536.
    If HostNetwork is specified, this must match ContainerPort.
    Most containers do not need this.
    +optional
    namestringstringIf specified, this must be an IANA_SVC_NAME and unique within the pod. Each
    named port in a pod must have a unique name. Name for the port that can be
    referred to by services.
    +optional
    protocolProtocolProtocol
    -

    ContainerSetRetryStrategy

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    durationstringstringDuration is the time between each retry, examples values are "300ms", "1s" or "5m".
    Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
    retriesIntOrStringIntOrString
    -

    ContainerSetTemplate

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    containers[]ContainerNode[]*ContainerNode
    retryStrategyContainerSetRetryStrategyContainerSetRetryStrategy
    volumeMounts[]VolumeMount[]*VolumeMount
    -

    ContinueOn

    -
    -

    It can be specified if the workflow should continue when the pod errors, fails or both.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    errorbooleanbool+optional
    failedbooleanbool+optional
    -

    Counter

    -
    -

    Counter is a Counter prometheus metric

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    valuestringstringValue is the value of the metric
    -

    CreateS3BucketOptions

    -
    -

    CreateS3BucketOptions options used to determine automatic automatic bucket-creation process

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    objectLockingbooleanboolObjectLocking Enable object locking
    -

    DAGTask

    -
    -

    DAGTask represents a node in the graph during DAG execution

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    argumentsArgumentsArguments
    continueOnContinueOnContinueOn
    dependencies[]string[]stringDependencies are name of other targets which this depends on
    dependsstringstringDepends are name of other targets which this depends on
    hooksLifecycleHooksLifecycleHooks
    inlineTemplateTemplate
    namestringstringName is the name of the target
    onExitstringstringOnExit is a template reference which is invoked at the end of the
    template, irrespective of the success, failure, or error of the
    primary template.
    DEPRECATED: Use Hooks[exit].Template instead.
    templatestringstringName of template to execute
    templateRefTemplateRefTemplateRef
    whenstringstringWhen is an expression in which the task should conditionally execute
    withItems[]Item[]ItemWithItems expands a task into multiple parallel tasks from the items in the list
    withParamstringstringWithParam expands a task into multiple parallel tasks from the value in the parameter,
    which is expected to be a JSON list.
    withSequenceSequenceSequence
    -

    DAGTemplate

    -
    -

    DAGTemplate is a template subtype for directed acyclic graph templates

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    failFastbooleanboolThis flag is for DAG logic. The DAG logic has a built-in "fail fast" feature to stop scheduling new steps,
    as soon as it detects that one of the DAG nodes is failed. Then it waits until all DAG nodes are completed
    before failing the DAG itself.
    The FailFast flag default is true, if set to false, it will allow a DAG to run all branches of the DAG to
    completion (either success or failure), regardless of the failed outcomes of branches in the DAG.
    More info and example about this feature at https://github.com/argoproj/argo-workflows/issues/1442
    targetstringstringTarget are one or more names of targets to execute in a DAG
    tasks[]DAGTask[]*DAGTaskTasks are a list of DAG tasks
    +patchStrategy=merge
    +patchMergeKey=name
    -

    Data

    -
    -

    Data is a data template

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    sourceDataSourceDataSource
    transformationTransformationTransformation
    -

    DataSource

    -
    -

    DataSource sources external data into a data template

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    artifactPathsArtifactPathsArtifactPaths
    -

    DownwardAPIProjection

    -
    -

    Note that this is identical to a downwardAPI volume source without the default -mode.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    items[]DownwardAPIVolumeFile[]*DownwardAPIVolumeFileItems is a list of DownwardAPIVolume file
    +optional
    -

    DownwardAPIVolumeFile

    -
    -

    DownwardAPIVolumeFile represents information to create the file containing the pod field

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    fieldRefObjectFieldSelectorObjectFieldSelector
    modeint32 (formatted integer)int32Optional: mode bits used to set permissions on this file, must be an octal value
    between 0000 and 0777 or a decimal value between 0 and 511.
    YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
    If not specified, the volume defaultMode will be used.
    This might be in conflict with other options that affect the file
    mode, like fsGroup, and the result can be other mode bits set.
    +optional
    pathstringstringRequired: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
    resourceFieldRefResourceFieldSelectorResourceFieldSelector
    -

    DownwardAPIVolumeSource

    -
    -

    Downward API volumes support ownership management and SELinux relabeling.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    defaultModeint32 (formatted integer)int32Optional: mode bits to use on created files by default. Must be a
    Optional: mode bits used to set permissions on created files by default.
    Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
    YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
    Defaults to 0644.
    Directories within the path are not affected by this setting.
    This might be in conflict with other options that affect the file
    mode, like fsGroup, and the result can be other mode bits set.
    +optional
    items[]DownwardAPIVolumeFile[]*DownwardAPIVolumeFileItems is a list of downward API volume file
    +optional
    -

    Duration

    -
    -

    Duration is a wrapper around time.Duration which supports correct -marshaling to YAML and JSON. In particular, it marshals into strings, which -can be used as map keys in json.

    -
    -

    interface{}

    -

    EmptyDirVolumeSource

    -
    -

    Empty directory volumes support ownership management and SELinux relabeling.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    mediumStorageMediumStorageMedium
    sizeLimitQuantityQuantity
    -

    EnvFromSource

    -
    -

    EnvFromSource represents the source of a set of ConfigMaps

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    configMapRefConfigMapEnvSourceConfigMapEnvSource
    prefixstringstringAn optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
    +optional
    secretRefSecretEnvSourceSecretEnvSource
    -

    EnvVar

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    namestringstringName of the environment variable. Must be a C_IDENTIFIER.
    valuestringstringVariable references $(VAR_NAME) are expanded
    using the previously defined environment variables in the container and
    any service environment variables. If a variable cannot be resolved,
    the reference in the input string will be unchanged. Double $$ are reduced
    to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
    "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
    Escaped references will never be expanded, regardless of whether the variable
    exists or not.
    Defaults to "".
    +optional
    valueFromEnvVarSourceEnvVarSource
    -

    EnvVarSource

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    configMapKeyRefConfigMapKeySelectorConfigMapKeySelector
    fieldRefObjectFieldSelectorObjectFieldSelector
    resourceFieldRefResourceFieldSelectorResourceFieldSelector
    secretKeyRefSecretKeySelectorSecretKeySelector
    -

    EphemeralVolumeSource

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    volumeClaimTemplatePersistentVolumeClaimTemplatePersistentVolumeClaimTemplate
    -

    ExecAction

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    command[]string[]stringCommand is the command line to execute inside the container, the working directory for the
    command is root ('/') in the container's filesystem. The command is simply exec'd, it is
    not run inside a shell, so traditional shell instructions ('', etc) won't work. To use
    a shell, you need to explicitly call out to that shell.
    Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
    +optional
    -

    ExecuteTemplateArgs

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    templateTemplateTemplate
    workflowWorkflowWorkflow
    -

    ExecuteTemplateReply

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    nodeNodeResultNodeResult
    requeueDurationDuration
    -

    ExecutorConfig

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    serviceAccountNamestringstringServiceAccountName specifies the service account name of the executor container.
    -

    FCVolumeSource

    -
    -

    Fibre Channel volumes can only be mounted as read/write once. -Fibre Channel volumes support ownership management and SELinux relabeling.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    fsTypestringstringfsType is the filesystem type to mount.
    Must be a filesystem type supported by the host operating system.
    Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
    TODO: how do we prevent errors in the filesystem from compromising the machine
    +optional
    lunint32 (formatted integer)int32lun is Optional: FC target lun number
    +optional
    readOnlybooleanboolreadOnly is Optional: Defaults to false (read/write). ReadOnly here will force
    the ReadOnly setting in VolumeMounts.
    +optional
    targetWWNs[]string[]stringtargetWWNs is Optional: FC target worldwide names (WWNs)
    +optional
    wwids[]string[]stringwwids Optional: FC volume world wide identifiers (wwids)
    Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
    +optional
    -

    FieldsV1

    -
    -

    Each key is either a '.' representing the field itself, and will always map to an empty set, -or a string representing a sub-field or item. The string will follow one of these four formats: -'f:', where is the name of a field in a struct, or key in a map -'v:', where is the exact json formatted value of a list item -'i:', where is position of a item in a list -'k:', where is a map of a list item's key fields to their unique values -If a key maps to an empty Fields value, the field that key represents is part of the set.

    -
    -

    The exact format is defined in sigs.k8s.io/structured-merge-diff -+protobuf.options.(gogoproto.goproto_stringer)=false

    -

    interface{}

    -

    FlexVolumeSource

    -
    -

    FlexVolume represents a generic volume resource that is -provisioned/attached using an exec based plugin.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    driverstringstringdriver is the name of the driver to use for this volume.
    fsTypestringstringfsType is the filesystem type to mount.
    Must be a filesystem type supported by the host operating system.
    Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
    +optional
    optionsmap of stringmap[string]stringoptions is Optional: this field holds extra command options if any.
    +optional
    readOnlybooleanboolreadOnly is Optional: defaults to false (read/write). ReadOnly here will force
    the ReadOnly setting in VolumeMounts.
    +optional
    secretRefLocalObjectReferenceLocalObjectReference
    -

    FlockerVolumeSource

    -
    -

    One and only one of datasetName and datasetUUID should be set. -Flocker volumes do not support ownership management or SELinux relabeling.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    datasetNamestringstringdatasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker
    should be considered as deprecated
    +optional
    datasetUUIDstringstringdatasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset
    +optional
    -

    GCEPersistentDiskVolumeSource

    -
    -

    A GCE PD must exist before mounting to a container. The disk must -also be in the same GCE project and zone as the kubelet. A GCE PD -can only be mounted as read/write once or read-only many times. GCE -PDs support ownership management and SELinux relabeling.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    fsTypestringstringfsType is filesystem type of the volume that you want to mount.
    Tip: Ensure that the filesystem type is supported by the host operating system.
    Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
    More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
    TODO: how do we prevent errors in the filesystem from compromising the machine
    +optional
    partitionint32 (formatted integer)int32partition is the partition in the volume that you want to mount.
    If omitted, the default is to mount by volume name.
    Examples: For volume /dev/sda1, you specify the partition as "1".
    Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
    More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
    +optional
    pdNamestringstringpdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.
    More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
    readOnlybooleanboolreadOnly here will force the ReadOnly setting in VolumeMounts.
    Defaults to false.
    More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
    +optional
    -

    GCSArtifact

    -
    -

    GCSArtifact is the location of a GCS artifact

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    bucketstringstringBucket is the name of the bucket
    keystringstringKey is the path in the bucket where the artifact resides
    serviceAccountKeySecretSecretKeySelectorSecretKeySelector
    -

    GRPCAction

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    portint32 (formatted integer)int32Port number of the gRPC service. Number must be in the range 1 to 65535.
    servicestringstringService is the name of the service to place in the gRPC HealthCheckRequest
    (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
    -

    If this is not specified, the default behavior is defined by gRPC. -+optional -+default="" | |

    -

    Gauge

    -
    -

    Gauge is a Gauge prometheus metric

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    operationGaugeOperationGaugeOperation
    realtimebooleanboolRealtime emits this metric in real time if applicable
    valuestringstringValue is the value to be used in the operation with the metric's current value. If no operation is set,
    value is the value of the metric
    -

    GaugeOperation

    - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    GaugeOperationstringstring
    -

    GitArtifact

    -
    -

    GitArtifact is the location of an git artifact

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    branchstringstringBranch is the branch to fetch when SingleBranch is enabled
    depthuint64 (formatted integer)uint64Depth specifies clones/fetches should be shallow and include the given
    number of commits from the branch tip
    disableSubmodulesbooleanboolDisableSubmodules disables submodules during git clone
    fetch[]string[]stringFetch specifies a number of refs that should be fetched before checkout
    insecureIgnoreHostKeybooleanboolInsecureIgnoreHostKey disables SSH strict host key checking during git clone
    passwordSecretSecretKeySelectorSecretKeySelector
    repostringstringRepo is the git repository
    revisionstringstringRevision is the git commit, tag, branch to checkout
    singleBranchbooleanboolSingleBranch enables single branch clone, using the branch parameter
    sshPrivateKeySecretSecretKeySelectorSecretKeySelector
    usernameSecretSecretKeySelectorSecretKeySelector
    -

    GitRepoVolumeSource

    -
    -

    DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an -EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir -into the Pod's container.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    directorystringstringdirectory is the target directory name.
    Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
    git repository. Otherwise, if specified, the volume will contain the git repository in
    the subdirectory with the given name.
    +optional
    repositorystringstringrepository is the URL
    revisionstringstringrevision is the commit hash for the specified revision.
    +optional
    -

    GlusterfsVolumeSource

    -
    -

    Glusterfs volumes do not support ownership management or SELinux relabeling.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    endpointsstringstringendpoints is the endpoint name that details Glusterfs topology.
    More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
    pathstringstringpath is the Glusterfs volume path.
    More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
    readOnlybooleanboolreadOnly here will force the Glusterfs volume to be mounted with read-only permissions.
    Defaults to false.
    More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
    +optional
    -

    HDFSArtifact

    -
    -

    HDFSArtifact is the location of an HDFS artifact

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    addresses[]string[]stringAddresses is accessible addresses of HDFS name nodes
    forcebooleanboolForce copies a file forcibly even if it exists
    hdfsUserstringstringHDFSUser is the user to access HDFS file system.
    It is ignored if either ccache or keytab is used.
    krbCCacheSecretSecretKeySelectorSecretKeySelector
    krbConfigConfigMapConfigMapKeySelectorConfigMapKeySelector
    krbKeytabSecretSecretKeySelectorSecretKeySelector
    krbRealmstringstringKrbRealm is the Kerberos realm used with Kerberos keytab
    It must be set if keytab is used.
    krbServicePrincipalNamestringstringKrbServicePrincipalName is the principal name of Kerberos service
    It must be set if either ccache or keytab is used.
    krbUsernamestringstringKrbUsername is the Kerberos username used with Kerberos keytab
    It must be set if keytab is used.
    pathstringstringPath is a file path in HDFS
    -

    HTTP

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    bodystringstringBody is content of the HTTP Request
    bodyFromHTTPBodySourceHTTPBodySource
    headersHTTPHeadersHTTPHeaders
    insecureSkipVerifybooleanboolInsecureSkipVerify is a bool when if set to true will skip TLS verification for the HTTP client
    methodstringstringMethod is HTTP methods for HTTP Request
    successConditionstringstringSuccessCondition is an expression if evaluated to true is considered successful
    timeoutSecondsint64 (formatted integer)int64TimeoutSeconds is request timeout for HTTP Request. Default is 30 seconds
    urlstringstringURL of the HTTP Request
    -

    HTTPArtifact

    -
    -

    HTTPArtifact allows a file served on HTTP to be placed as an input artifact in a container

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    authHTTPAuthHTTPAuth
    headers[]Header[]*HeaderHeaders are an optional list of headers to send with HTTP requests for artifacts
    urlstringstringURL of the artifact
    -

    HTTPAuth

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    basicAuthBasicAuthBasicAuth
    clientCertClientCertAuthClientCertAuth
    oauth2OAuth2AuthOAuth2Auth
    -

    HTTPBodySource

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    bytes[]uint8 (formatted integer)[]uint8
    -

    HTTPGetAction

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    hoststringstringHost name to connect to, defaults to the pod IP. You probably want to set
    "Host" in httpHeaders instead.
    +optional
    httpHeaders[]HTTPHeader[]*HTTPHeaderCustom headers to set in the request. HTTP allows repeated headers.
    +optional
    pathstringstringPath to access on the HTTP server.
    +optional
    portIntOrStringIntOrString
    schemeURISchemeURIScheme
    -

    HTTPHeader

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    namestringstring
    valuestringstring
    valueFromHTTPHeaderSourceHTTPHeaderSource
    -

    HTTPHeaderSource

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    secretKeyRefSecretKeySelectorSecretKeySelector
    -

    HTTPHeaders

    -

    []HTTPHeader

    - -
    -

    Header indicate a key-value request header to be used when fetching artifacts over HTTP

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    namestringstringName is the header name
    valuestringstringValue is the literal value to use for the header
    -

    Histogram

    -
    -

    Histogram is a Histogram prometheus metric

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    buckets[]Amount[]AmountBuckets is a list of bucket divisors for the histogram
    valuestringstringValue is the value of the metric
    -

    HostAlias

    -
    -

    HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the -pod's hosts file.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    hostnames[]string[]stringHostnames for the above IP address.
    ipstringstringIP address of the host file entry.
    -

    HostPathType

    -
    -

    +enum

    -
    - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    HostPathTypestringstring+enum
    -

    HostPathVolumeSource

    -
    -

    Host path volumes do not support ownership management or SELinux relabeling.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    pathstringstringpath of the directory on the host.
    If the path is a symlink, it will follow the link to the real path.
    More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
    typeHostPathTypeHostPathType
    -

    ISCSIVolumeSource

    -
    -

    ISCSI volumes can only be mounted as read/write once. -ISCSI volumes support ownership management and SELinux relabeling.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    chapAuthDiscoverybooleanboolchapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
    +optional
    chapAuthSessionbooleanboolchapAuthSession defines whether support iSCSI Session CHAP authentication
    +optional
    fsTypestringstringfsType is the filesystem type of the volume that you want to mount.
    Tip: Ensure that the filesystem type is supported by the host operating system.
    Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
    More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
    TODO: how do we prevent errors in the filesystem from compromising the machine
    +optional
    initiatorNamestringstringinitiatorName is the custom iSCSI Initiator Name.
    If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
    : will be created for the connection.
    +optional
    iqnstringstringiqn is the target iSCSI Qualified Name.
    iscsiInterfacestringstringiscsiInterface is the interface Name that uses an iSCSI transport.
    Defaults to 'default' (tcp).
    +optional
    lunint32 (formatted integer)int32lun represents iSCSI Target Lun number.
    portals[]string[]stringportals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
    is other than default (typically TCP ports 860 and 3260).
    +optional
    readOnlybooleanboolreadOnly here will force the ReadOnly setting in VolumeMounts.
    Defaults to false.
    +optional
    secretRefLocalObjectReferenceLocalObjectReference
    targetPortalstringstringtargetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
    is other than default (typically TCP ports 860 and 3260).
    -

    Inputs

    -
    -

    Inputs are the mechanism for passing parameters, artifacts, volumes from one template to another

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    artifactsArtifactsArtifacts
    parameters[]Parameter[]*ParameterParameters are a list of parameters passed as inputs
    +patchStrategy=merge
    +patchMergeKey=name
    -

    IntOrString

    -
    -

    +protobuf=true -+protobuf.options.(gogoproto.goproto_stringer)=false -+k8s:openapi-gen=true

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    IntValint32 (formatted integer)int32
    StrValstringstring
    TypeTypeType
    -

    Item

    -
    -

    +protobuf.options.(gogoproto.goproto_stringer)=false -+kubebuilder:validation:Type=object

    -
    -

    interface{}

    -

    KeyToPath

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    keystringstringkey is the key to project.
    modeint32 (formatted integer)int32mode is Optional: mode bits used to set permissions on this file.
    Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
    YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
    If not specified, the volume defaultMode will be used.
    This might be in conflict with other options that affect the file
    mode, like fsGroup, and the result can be other mode bits set.
    +optional
    pathstringstringpath is the relative path of the file to map the key to.
    May not be an absolute path.
    May not contain the path element '..'.
    May not start with the string '..'.
    -

    LabelSelector

    -
    -

    A label selector is a label query over a set of resources. The result of matchLabels and -matchExpressions are ANDed. An empty label selector matches all objects. A null -label selector matches no objects. -+structType=atomic

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    matchExpressions[]LabelSelectorRequirement[]*LabelSelectorRequirementmatchExpressions is a list of label selector requirements. The requirements are ANDed.
    +optional
    matchLabelsmap of stringmap[string]stringmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
    map is equivalent to an element of matchExpressions, whose key field is "key", the
    operator is "In", and the values array contains only "value". The requirements are ANDed.
    +optional
    -

    LabelSelectorOperator

    - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    LabelSelectorOperatorstringstring
    -

    LabelSelectorRequirement

    -
    -

    A label selector requirement is a selector that contains values, a key, and an operator that -relates the key and values.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    keystringstringkey is the label key that the selector applies to.
    +patchMergeKey=key
    +patchStrategy=merge
    operatorLabelSelectorOperatorLabelSelectorOperator
    values[]string[]stringvalues is an array of string values. If the operator is In or NotIn,
    the values array must be non-empty. If the operator is Exists or DoesNotExist,
    the values array must be empty. This array is replaced during a strategic
    merge patch.
    +optional
    -

    Lifecycle

    -
    -

    Lifecycle describes actions that the management system should take in response to container lifecycle -events. For the PostStart and PreStop lifecycle handlers, management of the container blocks -until the action is complete, unless the container process fails, in which case the handler is aborted.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    postStartLifecycleHandlerLifecycleHandler
    preStopLifecycleHandlerLifecycleHandler
    -

    LifecycleHandler

    -
    -

    LifecycleHandler defines a specific action that should be taken in a lifecycle -hook. One and only one of the fields, except TCPSocket must be specified.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    execExecActionExecAction
    httpGetHTTPGetActionHTTPGetAction
    tcpSocketTCPSocketActionTCPSocketAction
    -

    LifecycleHook

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    argumentsArgumentsArguments
    expressionstringstringExpression is a condition expression for when a node will be retried. If it evaluates to false, the node will not
    be retried and the retry strategy will be ignored
    templatestringstringTemplate is the name of the template to execute by the hook
    templateRefTemplateRefTemplateRef
    -

    LifecycleHooks

    -

    LifecycleHooks

    -

    LocalObjectReference

    -
    -

    LocalObjectReference contains enough information to let you locate the -referenced object inside the same namespace. -+structType=atomic

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    namestringstringName of the referent.
    More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
    TODO: Add other useful fields. apiVersion, kind, uid?
    +optional
    -

    ManagedFieldsEntry

    -
    -

    ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource -that the fieldset applies to.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    apiVersionstringstringAPIVersion defines the version of this resource that this field set
    applies to. The format is "group/version" just like the top-level
    APIVersion field. It is necessary to track the version of a field
    set because it cannot be automatically converted.
    fieldsTypestringstringFieldsType is the discriminator for the different fields format and version.
    There is currently only one possible value: "FieldsV1"
    fieldsV1FieldsV1FieldsV1
    managerstringstringManager is an identifier of the workflow managing these fields.
    operationManagedFieldsOperationTypeManagedFieldsOperationType
    subresourcestringstringSubresource is the name of the subresource used to update that object, or
    empty string if the object was updated through the main resource. The
    value of this field is used to distinguish between managers, even if they
    share the same name. For example, a status update will be distinct from a
    regular update using the same manager name.
    Note that the APIVersion field is not related to the Subresource field and
    it always corresponds to the version of the main resource.
    timeTimeTime
    -

    ManagedFieldsOperationType

    - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    ManagedFieldsOperationTypestringstring
    -

    ManifestFrom

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    artifactArtifactArtifact
    -

    Memoize

    -
    -

    Memoization enables caching for the Outputs of the template

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    cacheCacheCache
    keystringstringKey is the key to use as the caching key
    maxAgestringstringMaxAge is the maximum age (e.g. "180s", "24h") of an entry that is still considered valid. If an entry is older
    than the MaxAge, it will be ignored.
    -

    Metadata

    -
    -

    Pod metdata

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    annotationsmap of stringmap[string]string
    labelsmap of stringmap[string]string
    -

    MetricLabel

    -
    -

    MetricLabel is a single label for a prometheus metric

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    keystringstring
    valuestringstring
    -

    Metrics

    -
    -

    Metrics are a list of metrics emitted from a Workflow/Template

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    prometheus[]Prometheus[]*PrometheusPrometheus is a list of prometheus metrics to be emitted
    -

    MountPropagationMode

    -
    -

    +enum

    -
    - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    MountPropagationModestringstring+enum
    -

    Mutex

    -
    -

    Mutex holds Mutex configuration

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    namestringstringname of the mutex
    namespacestringstring"[namespace of workflow]"
    -

    NFSVolumeSource

    -
    -

    NFS volumes do not support ownership management or SELinux relabeling.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    pathstringstringpath that is exported by the NFS server.
    More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
    readOnlybooleanboolreadOnly here will force the NFS export to be mounted with read-only permissions.
    Defaults to false.
    More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
    +optional
    serverstringstringserver is the hostname or IP address of the NFS server.
    More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
    -

    NodeAffinity

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    preferredDuringSchedulingIgnoredDuringExecution[]PreferredSchedulingTerm[]*PreferredSchedulingTermThe scheduler will prefer to schedule pods to nodes that satisfy
    the affinity expressions specified by this field, but it may choose
    a node that violates one or more of the expressions. The node that is
    most preferred is the one with the greatest sum of weights, i.e.
    for each node that meets all of the scheduling requirements (resource
    request, requiredDuringScheduling affinity expressions, etc.),
    compute a sum by iterating through the elements of this field and adding
    "weight" to the sum if the node matches the corresponding matchExpressions; the
    node(s) with the highest sum are the most preferred.
    +optional
    requiredDuringSchedulingIgnoredDuringExecutionNodeSelectorNodeSelector
    -

    NodePhase

    - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    NodePhasestringstring
    -

    NodeResult

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    messagestringstring
    outputsOutputsOutputs
    phaseNodePhaseNodePhase
    progressProgressProgress
    -

    NodeSelector

    -
    -

    A node selector represents the union of the results of one or more label queries -over a set of nodes; that is, it represents the OR of the selectors represented -by the node selector terms. -+structType=atomic

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    nodeSelectorTerms[]NodeSelectorTerm[]*NodeSelectorTermRequired. A list of node selector terms. The terms are ORed.
    -

    NodeSelectorOperator

    -
    -

    A node selector operator is the set of operators that can be used in -a node selector requirement. -+enum

    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    NodeSelectorOperatorstringstringA node selector operator is the set of operators that can be used in
    a node selector requirement.
    +enum
    -

    NodeSelectorRequirement

    -
    -

    A node selector requirement is a selector that contains values, a key, and an operator -that relates the key and values.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    keystringstringThe label key that the selector applies to.
    operatorNodeSelectorOperatorNodeSelectorOperator
    values[]string[]stringAn array of string values. If the operator is In or NotIn,
    the values array must be non-empty. If the operator is Exists or DoesNotExist,
    the values array must be empty. If the operator is Gt or Lt, the values
    array must have a single element, which will be interpreted as an integer.
    This array is replaced during a strategic merge patch.
    +optional
    -

    NodeSelectorTerm

    -
    -

    A null or empty node selector term matches no objects. The requirements of -them are ANDed. -The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. -+structType=atomic

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    matchExpressions[]NodeSelectorRequirement[]*NodeSelectorRequirementA list of node selector requirements by node's labels.
    +optional
    matchFields[]NodeSelectorRequirement[]*NodeSelectorRequirementA list of node selector requirements by node's fields.
    +optional
    -

    NoneStrategy

    -
    -

    NoneStrategy indicates to skip tar process and upload the files or directory tree as independent -files. Note that if the artifact is a directory, the artifact driver must support the ability to -save/load the directory appropriately.

    -
    -

    interface{}

    -

    OAuth2Auth

    -
    -

    OAuth2Auth holds all information for client authentication via OAuth2 tokens

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    clientIDSecretSecretKeySelectorSecretKeySelector
    clientSecretSecretSecretKeySelectorSecretKeySelector
    endpointParams[]OAuth2EndpointParam[]*OAuth2EndpointParam
    scopes[]string[]string
    tokenURLSecretSecretKeySelectorSecretKeySelector
    -

    OAuth2EndpointParam

    -
    -

    EndpointParam is for requesting optional fields that should be sent in the oauth request

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    keystringstringName is the header name
    valuestringstringValue is the literal value to use for the header
    -

    OSSArtifact

    -
    -

    OSSArtifact is the location of an Alibaba Cloud OSS artifact

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    accessKeySecretSecretKeySelectorSecretKeySelector
    bucketstringstringBucket is the name of the bucket
    createBucketIfNotPresentbooleanboolCreateBucketIfNotPresent tells the driver to attempt to create the OSS bucket for output artifacts, if it doesn't exist
    endpointstringstringEndpoint is the hostname of the bucket endpoint
    keystringstringKey is the path in the bucket where the artifact resides
    lifecycleRuleOSSLifecycleRuleOSSLifecycleRule
    secretKeySecretSecretKeySelectorSecretKeySelector
    securityTokenstringstringSecurityToken is the user's temporary security token. For more details, check out: https://www.alibabacloud.com/help/doc-detail/100624.htm
    useSDKCredsbooleanboolUseSDKCreds tells the driver to figure out credentials based on sdk defaults.
    -

    OSSLifecycleRule

    -
    -

    OSSLifecycleRule specifies how to manage bucket's lifecycle

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    markDeletionAfterDaysint32 (formatted integer)int32MarkDeletionAfterDays is the number of days before we delete objects in the bucket
    markInfrequentAccessAfterDaysint32 (formatted integer)int32MarkInfrequentAccessAfterDays is the number of days before we convert the objects in the bucket to Infrequent Access (IA) storage type
    -

    ObjectFieldSelector

    -
    -

    +structType=atomic

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    apiVersionstringstringVersion of the schema the FieldPath is written in terms of, defaults to "v1".
    +optional
    fieldPathstringstringPath of the field to select in the specified API version.
    -

    ObjectMeta

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    namestringstring
    namespacestringstring
    uidstringstring
    -

    Outputs

    -
    -

    Outputs hold parameters, artifacts, and results from a step

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    artifactsArtifactsArtifacts
    exitCodestringstringExitCode holds the exit code of a script template
    parameters[]Parameter[]*ParameterParameters holds the list of output parameters produced by a step
    +patchStrategy=merge
    +patchMergeKey=name
    resultstringstringResult holds the result (stdout) of a script template
    -

    OwnerReference

    -
    -

    OwnerReference contains enough information to let you identify an owning -object. An owning object must be in the same namespace as the dependent, or -be cluster-scoped, so there is no namespace field. -+structType=atomic

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    apiVersionstringstringAPI version of the referent.
    blockOwnerDeletionbooleanboolIf true, AND if the owner has the "foregroundDeletion" finalizer, then
    the owner cannot be deleted from the key-value store until this
    reference is removed.
    See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion
    for how the garbage collector interacts with this field and enforces the foreground deletion.
    Defaults to false.
    To set this field, a user needs "delete" permission of the owner,
    otherwise 422 (Unprocessable Entity) will be returned.
    +optional
    controllerbooleanboolIf true, this reference points to the managing controller.
    +optional
    kindstringstringKind of the referent.
    More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
    namestringstringName of the referent.
    More info: http://kubernetes.io/docs/user-guide/identifiers#names
    uidUIDUID
    -

    ParallelSteps

    -
    -

    +kubebuilder:validation:Type=array

    -
    -

    interface{}

    -

    Parameter

    -
    -

    Parameter indicate a passed string parameter to a service template with an optional default value

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    defaultAnyStringAnyString
    descriptionAnyStringAnyString
    enum[]AnyString[]AnyStringEnum holds a list of string values to choose from, for the actual value of the parameter
    globalNamestringstringGlobalName exports an output parameter to the global scope, making it available as
    '{{workflow.outputs.parameters.XXXX}} and in workflow.status.outputs.parameters
    namestringstringName is the parameter name
    valueAnyStringAnyString
    valueFromValueFromValueFrom
    -

    PersistentVolumeAccessMode

    -
    -

    +enum

    -
    - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    PersistentVolumeAccessModestringstring+enum
    -

    PersistentVolumeClaimSpec

    -
    -

    PersistentVolumeClaimSpec describes the common attributes of storage devices -and allows a Source for provider-specific attributes

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    accessModes[]PersistentVolumeAccessMode[]PersistentVolumeAccessModeaccessModes contains the desired access modes the volume should have.
    More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
    +optional
    dataSourceTypedLocalObjectReferenceTypedLocalObjectReference
    dataSourceRefTypedLocalObjectReferenceTypedLocalObjectReference
    resourcesResourceRequirementsResourceRequirements
    selectorLabelSelectorLabelSelector
    storageClassNamestringstringstorageClassName is the name of the StorageClass required by the claim.
    More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
    +optional
    volumeModePersistentVolumeModePersistentVolumeMode
    volumeNamestringstringvolumeName is the binding reference to the PersistentVolume backing this claim.
    +optional
    -

    PersistentVolumeClaimTemplate

    -
    -

    PersistentVolumeClaimTemplate is used to produce -PersistentVolumeClaim objects as part of an EphemeralVolumeSource.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    annotationsmap of stringmap[string]stringAnnotations is an unstructured key value map stored with a resource that may be
    set by external tools to store and retrieve arbitrary metadata. They are not
    queryable and should be preserved when modifying objects.
    More info: http://kubernetes.io/docs/user-guide/annotations
    +optional
    clusterNamestringstringDeprecated: ClusterName is a legacy field that was always cleared by
    the system and never used; it will be removed completely in 1.25.
    -

    The name in the go struct is changed to help clients detect -accidental use.

    -

    +optional | | -| creationTimestamp | Time| Time | | | | | -| deletionGracePeriodSeconds | int64 (formatted integer)| int64 | | | Number of seconds allowed for this object to gracefully terminate before -it will be removed from the system. Only set when deletionTimestamp is also set. -May only be shortened. -Read-only. -+optional | | -| deletionTimestamp | Time| Time | | | | | -| finalizers | []string| []string | | | Must be empty before the object is deleted from the registry. Each entry -is an identifier for the responsible component that will remove the entry -from the list. If the deletionTimestamp of the object is non-nil, entries -in this list can only be removed. -Finalizers may be processed and removed in any order. Order is NOT enforced -because it introduces significant risk of stuck finalizers. -finalizers is a shared field, any actor with permission can reorder it. -If the finalizer list is processed in order, then this can lead to a situation -in which the component responsible for the first finalizer in the list is -waiting for a signal (field value, external system, or other) produced by a -component responsible for a finalizer later in the list, resulting in a deadlock. -Without enforced ordering finalizers are free to order amongst themselves and -are not vulnerable to ordering changes in the list. -+optional -+patchStrategy=merge | | -| generateName | string| string | | | GenerateName is an optional prefix, used by the server, to generate a unique -name ONLY IF the Name field has not been provided. -If this field is used, the name returned to the client will be different -than the name passed. This value will also be combined with a unique suffix. -The provided value has the same validation rules as the Name field, -and may be truncated by the length of the suffix required to make the value -unique on the server.

    -

    If this field is specified and the generated name exists, the server will return a 409.

    -

    Applied only if Name is not specified. -More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency -+optional | | -| generation | int64 (formatted integer)| int64 | | | A sequence number representing a specific generation of the desired state. -Populated by the system. Read-only. -+optional | | -| labels | map of string| map[string]string | | | Map of string keys and values that can be used to organize and categorize -(scope and select) objects. May match selectors of replication controllers -and services. -More info: http://kubernetes.io/docs/user-guide/labels -+optional | | -| managedFields | []ManagedFieldsEntry| []*ManagedFieldsEntry | | | ManagedFields maps workflow-id and version to the set of fields -that are managed by that workflow. This is mostly for internal -housekeeping, and users typically shouldn't need to set or -understand this field. A workflow can be the user's name, a -controller's name, or the name of a specific apply path like -"ci-cd". The set of fields is always in the version that the -workflow used when modifying the object.

    -

    +optional | | -| name | string| string | | | Name must be unique within a namespace. Is required when creating resources, although -some resources may allow a client to request the generation of an appropriate name -automatically. Name is primarily intended for creation idempotence and configuration -definition. -Cannot be updated. -More info: http://kubernetes.io/docs/user-guide/identifiers#names -+optional | | -| namespace | string| string | | | Namespace defines the space within which each name must be unique. An empty namespace is -equivalent to the "default" namespace, but "default" is the canonical representation. -Not all objects are required to be scoped to a namespace - the value of this field for -those objects will be empty.

    -

    Must be a DNS_LABEL. -Cannot be updated. -More info: http://kubernetes.io/docs/user-guide/namespaces -+optional | | -| ownerReferences | []OwnerReference| []*OwnerReference | | | List of objects depended by this object. If ALL objects in the list have -been deleted, this object will be garbage collected. If this object is managed by a controller, -then an entry in this list will point to this controller, with the controller field set to true. -There cannot be more than one managing controller. -+optional -+patchMergeKey=uid -+patchStrategy=merge | | -| resourceVersion | string| string | | | An opaque value that represents the internal version of this object that can -be used by clients to determine when objects have changed. May be used for optimistic -concurrency, change detection, and the watch operation on a resource or set of resources. -Clients must treat these values as opaque and passed unmodified back to the server. -They may only be valid for a particular resource or set of resources.

    -

    Populated by the system. -Read-only. -Value must be treated as opaque by clients and . -More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency -+optional | | -| selfLink | string| string | | | Deprecated: selfLink is a legacy read-only field that is no longer populated by the system. -+optional | | -| spec | PersistentVolumeClaimSpec| PersistentVolumeClaimSpec | | | | | -| uid | UID| UID | | | | |

    -

    PersistentVolumeClaimVolumeSource

    -
    -

    This volume finds the bound PV and mounts that volume for the pod. A -PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another -type of volume that is owned by someone else (the system).

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    claimNamestringstringclaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
    More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
    readOnlybooleanboolreadOnly Will force the ReadOnly setting in VolumeMounts.
    Default false.
    +optional
    -

    PersistentVolumeMode

    -
    -

    +enum

    -
    - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    PersistentVolumeModestringstring+enum
    -

    PhotonPersistentDiskVolumeSource

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    fsTypestringstringfsType is the filesystem type to mount.
    Must be a filesystem type supported by the host operating system.
    Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
    pdIDstringstringpdID is the ID that identifies Photon Controller persistent disk
    -

    Plugin

    -
    -

    Plugin is an Object with exactly one key

    -
    -

    interface{}

    -

    PodAffinity

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    preferredDuringSchedulingIgnoredDuringExecution[]WeightedPodAffinityTerm[]*WeightedPodAffinityTermThe scheduler will prefer to schedule pods to nodes that satisfy
    the affinity expressions specified by this field, but it may choose
    a node that violates one or more of the expressions. The node that is
    most preferred is the one with the greatest sum of weights, i.e.
    for each node that meets all of the scheduling requirements (resource
    request, requiredDuringScheduling affinity expressions, etc.),
    compute a sum by iterating through the elements of this field and adding
    "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
    node(s) with the highest sum are the most preferred.
    +optional
    requiredDuringSchedulingIgnoredDuringExecution[]PodAffinityTerm[]*PodAffinityTermIf the affinity requirements specified by this field are not met at
    scheduling time, the pod will not be scheduled onto the node.
    If the affinity requirements specified by this field cease to be met
    at some point during pod execution (e.g. due to a pod label update), the
    system may or may not try to eventually evict the pod from its node.
    When there are multiple elements, the lists of nodes corresponding to each
    podAffinityTerm are intersected, i.e. all terms must be satisfied.
    +optional
    -

    PodAffinityTerm

    -
    -

    Defines a set of pods (namely those matching the labelSelector -relative to the given namespace(s)) that this pod should be -co-located (affinity) or not co-located (anti-affinity) with, -where co-located is defined as running on a node whose value of -the label with key matches that of any node on which -a pod of the set of pods is running

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    labelSelectorLabelSelectorLabelSelector
    namespaceSelectorLabelSelectorLabelSelector
    namespaces[]string[]stringnamespaces specifies a static list of namespace names that the term applies to.
    The term is applied to the union of the namespaces listed in this field
    and the ones selected by namespaceSelector.
    null or empty namespaces list and null namespaceSelector means "this pod's namespace".
    +optional
    topologyKeystringstringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
    the labelSelector in the specified namespaces, where co-located is defined as running on a node
    whose value of the label with key topologyKey matches that of any node on which any of the
    selected pods is running.
    Empty topologyKey is not allowed.
    -

    PodAntiAffinity

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    preferredDuringSchedulingIgnoredDuringExecution[]WeightedPodAffinityTerm[]*WeightedPodAffinityTermThe scheduler will prefer to schedule pods to nodes that satisfy
    the anti-affinity expressions specified by this field, but it may choose
    a node that violates one or more of the expressions. The node that is
    most preferred is the one with the greatest sum of weights, i.e.
    for each node that meets all of the scheduling requirements (resource
    request, requiredDuringScheduling anti-affinity expressions, etc.),
    compute a sum by iterating through the elements of this field and adding
    "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
    node(s) with the highest sum are the most preferred.
    +optional
    requiredDuringSchedulingIgnoredDuringExecution[]PodAffinityTerm[]*PodAffinityTermIf the anti-affinity requirements specified by this field are not met at
    scheduling time, the pod will not be scheduled onto the node.
    If the anti-affinity requirements specified by this field cease to be met
    at some point during pod execution (e.g. due to a pod label update), the
    system may or may not try to eventually evict the pod from its node.
    When there are multiple elements, the lists of nodes corresponding to each
    podAffinityTerm are intersected, i.e. all terms must be satisfied.
    +optional
    -

    PodFSGroupChangePolicy

    -
    -

    PodFSGroupChangePolicy holds policies that will be used for applying fsGroup to a volume -when volume is mounted. -+enum

    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    PodFSGroupChangePolicystringstringPodFSGroupChangePolicy holds policies that will be used for applying fsGroup to a volume
    when volume is mounted.
    +enum
    -

    PodSecurityContext

    -
    -

    Some fields are also present in container.securityContext. Field values of -container.securityContext take precedence over field values of PodSecurityContext.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    fsGroupint64 (formatted integer)int64A special supplemental group that applies to all containers in a pod.
    Some volume types allow the Kubelet to change the ownership of that volume
    to be owned by the pod:
    -
      -
    1. The owning GID will be the FSGroup
    2. -
    3. The setgid bit is set (new files created in the volume will be owned by FSGroup)
    4. -
    5. The permission bits are OR'd with rw-rw----
    6. -
    -

    If unset, the Kubelet will not modify the ownership and permissions of any volume. -Note that this field cannot be set when spec.os.name is windows. -+optional | | -| fsGroupChangePolicy | PodFSGroupChangePolicy| PodFSGroupChangePolicy | | | | | -| runAsGroup | int64 (formatted integer)| int64 | | | The GID to run the entrypoint of the container process. -Uses runtime default if unset. -May also be set in SecurityContext. If set in both SecurityContext and -PodSecurityContext, the value specified in SecurityContext takes precedence -for that container. -Note that this field cannot be set when spec.os.name is windows. -+optional | | -| runAsNonRoot | boolean| bool | | | Indicates that the container must run as a non-root user. -If true, the Kubelet will validate the image at runtime to ensure that it -does not run as UID 0 (root) and fail to start the container if it does. -If unset or false, no such validation will be performed. -May also be set in SecurityContext. If set in both SecurityContext and -PodSecurityContext, the value specified in SecurityContext takes precedence. -+optional | | -| runAsUser | int64 (formatted integer)| int64 | | | The UID to run the entrypoint of the container process. -Defaults to user specified in image metadata if unspecified. -May also be set in SecurityContext. If set in both SecurityContext and -PodSecurityContext, the value specified in SecurityContext takes precedence -for that container. -Note that this field cannot be set when spec.os.name is windows. -+optional | | -| seLinuxOptions | SELinuxOptions| SELinuxOptions | | | | | -| seccompProfile | SeccompProfile| SeccompProfile | | | | | -| supplementalGroups | []int64 (formatted integer)| []int64 | | | A list of groups applied to the first process run in each container, in addition -to the container's primary GID. If unspecified, no groups will be added to -any container. -Note that this field cannot be set when spec.os.name is windows. -+optional | | -| sysctls | []Sysctl| []*Sysctl | | | Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported -sysctls (by the container runtime) might fail to launch. -Note that this field cannot be set when spec.os.name is windows. -+optional | | -| windowsOptions | WindowsSecurityContextOptions| WindowsSecurityContextOptions | | | | |

    -

    PortworxVolumeSource

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    fsTypestringstringfSType represents the filesystem type to mount
    Must be a filesystem type supported by the host operating system.
    Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
    readOnlybooleanboolreadOnly defaults to false (read/write). ReadOnly here will force
    the ReadOnly setting in VolumeMounts.
    +optional
    volumeIDstringstringvolumeID uniquely identifies a Portworx volume
    -

    PreferredSchedulingTerm

    -
    -

    An empty preferred scheduling term matches all objects with implicit weight 0 -(i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    preferenceNodeSelectorTermNodeSelectorTerm
    weightint32 (formatted integer)int32Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
    -

    Probe

    -
    -

    Probe describes a health check to be performed against a container to determine whether it is -alive or ready to receive traffic.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    execExecActionExecAction
    failureThresholdint32 (formatted integer)int32Minimum consecutive failures for the probe to be considered failed after having succeeded.
    Defaults to 3. Minimum value is 1.
    +optional
    grpcGRPCActionGRPCAction
    httpGetHTTPGetActionHTTPGetAction
    initialDelaySecondsint32 (formatted integer)int32Number of seconds after the container has started before liveness probes are initiated.
    More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
    +optional
    periodSecondsint32 (formatted integer)int32How often (in seconds) to perform the probe.
    Default to 10 seconds. Minimum value is 1.
    +optional
    successThresholdint32 (formatted integer)int32Minimum consecutive successes for the probe to be considered successful after having failed.
    Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
    +optional
    tcpSocketTCPSocketActionTCPSocketAction
    terminationGracePeriodSecondsint64 (formatted integer)int64Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
    The grace period is the duration in seconds after the processes running in the pod are sent
    a termination signal and the time when the processes are forcibly halted with a kill signal.
    Set this value longer than the expected cleanup time for your process.
    If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
    value overrides the value provided by the pod spec.
    Value must be non-negative integer. The value zero indicates stop immediately via
    the kill signal (no opportunity to shut down).
    This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
    Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
    +optional
    timeoutSecondsint32 (formatted integer)int32Number of seconds after which the probe times out.
    Defaults to 1 second. Minimum value is 1.
    More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
    +optional
    -

    ProcMountType

    -
    -

    +enum

    -
    - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    ProcMountTypestringstring+enum
    -

    Progress

    - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    Progressstringstring
    -

    ProjectedVolumeSource

    -
    -

    Represents a projected volume source

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    defaultModeint32 (formatted integer)int32defaultMode are the mode bits used to set permissions on created files by default.
    Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
    YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
    Directories within the path are not affected by this setting.
    This might be in conflict with other options that affect the file
    mode, like fsGroup, and the result can be other mode bits set.
    +optional
    sources[]VolumeProjection[]*VolumeProjectionsources is the list of volume projections
    +optional
    -

    Prometheus

    -
    -

    Prometheus is a prometheus metric to be emitted

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    counterCounterCounter
    gaugeGaugeGauge
    helpstringstringHelp is a string that describes the metric
    histogramHistogramHistogram
    labels[]MetricLabel[]*MetricLabelLabels is a list of metric labels
    namestringstringName is the name of the metric
    whenstringstringWhen is a conditional statement that decides when to emit the metric
    -

    Protocol

    -
    -

    +enum

    -
    - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    Protocolstringstring+enum
    -

    PullPolicy

    -
    -

    PullPolicy describes a policy for if/when to pull a container image -+enum

    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    PullPolicystringstringPullPolicy describes a policy for if/when to pull a container image
    +enum
    -

    Quantity

    -
    -

    The serialization format is:

    -
    -

    ::= -(Note that may be empty, from the "" case in .) - ::= 0 | 1 | ... | 9 - ::= | - ::= | . | . | . - ::= "+" | "-" - ::= | - ::= | | - ::= Ki | Mi | Gi | Ti | Pi | Ei -(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) - ::= m | "" | k | M | G | T | P | E -(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) - ::= "e" | "E"

    -

    No matter which of the three exponent forms is used, no quantity may represent -a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal -places. Numbers larger or more precise will be capped or rounded up. -(E.g.: 0.1m will rounded up to 1m.) -This may be extended in the future if we require larger or smaller quantities.

    -

    When a Quantity is parsed from a string, it will remember the type of suffix -it had, and will use the same type again when it is serialized.

    -

    Before serializing, Quantity will be put in "canonical form". -This means that Exponent/suffix will be adjusted up or down (with a -corresponding increase or decrease in Mantissa) such that: -a. No precision is lost -b. No fractional digits will be emitted -c. The exponent (or suffix) is as large as possible. -The sign will be omitted unless the number is negative.

    -

    Examples: -1.5 will be serialized as "1500m" -1.5Gi will be serialized as "1536Mi"

    -

    Note that the quantity will NEVER be internally represented by a -floating point number. That is the whole point of this exercise.

    -

    Non-canonical values will still parse as long as they are well formed, -but will be re-emitted in their canonical form. (So always use canonical -form, or don't diff.)

    -

    This format is intended to make it difficult to use these numbers without -writing some sort of special handling code in the hopes that that will -cause implementors to also use a fixed point implementation.

    -

    +protobuf=true -+protobuf.embed=string -+protobuf.options.marshal=false -+protobuf.options.(gogoproto.goproto_stringer)=false -+k8s:deepcopy-gen=true -+k8s:openapi-gen=true

    -

    interface{}

    -

    QuobyteVolumeSource

    -
    -

    Quobyte volumes do not support ownership management or SELinux relabeling.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    groupstringstringgroup to map volume access to
    Default is no group
    +optional
    readOnlybooleanboolreadOnly here will force the Quobyte volume to be mounted with read-only permissions.
    Defaults to false.
    +optional
    registrystringstringregistry represents a single or multiple Quobyte Registry services
    specified as a string as host:port pair (multiple entries are separated with commas)
    which acts as the central registry for volumes
    tenantstringstringtenant owning the given Quobyte volume in the Backend
    Used with dynamically provisioned Quobyte volumes, value is set by the plugin
    +optional
    userstringstringuser to map volume access to
    Defaults to serivceaccount user
    +optional
    volumestringstringvolume is a string that references an already created Quobyte volume by name.
    -

    RBDVolumeSource

    -
    -

    RBD volumes support ownership management and SELinux relabeling.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    fsTypestringstringfsType is the filesystem type of the volume that you want to mount.
    Tip: Ensure that the filesystem type is supported by the host operating system.
    Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
    More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
    TODO: how do we prevent errors in the filesystem from compromising the machine
    +optional
    imagestringstringimage is the rados image name.
    More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
    keyringstringstringkeyring is the path to key ring for RBDUser.
    Default is /etc/ceph/keyring.
    More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
    +optional
    monitors[]string[]stringmonitors is a collection of Ceph monitors.
    More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
    poolstringstringpool is the rados pool name.
    Default is rbd.
    More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
    +optional
    readOnlybooleanboolreadOnly here will force the ReadOnly setting in VolumeMounts.
    Defaults to false.
    More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
    +optional
    secretRefLocalObjectReferenceLocalObjectReference
    userstringstringuser is the rados user name.
    Default is admin.
    More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
    +optional
    -

    RawArtifact

    -
    -

    RawArtifact allows raw string content to be placed as an artifact in a container

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    datastringstringData is the string contents of the artifact
    -

    ResourceFieldSelector

    -
    -

    ResourceFieldSelector represents container resources (cpu, memory) and their output format -+structType=atomic

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    containerNamestringstringContainer name: required for volumes, optional for env vars
    +optional
    divisorQuantityQuantity
    resourcestringstringRequired: resource to select
    -

    ResourceList

    -

    ResourceList

    -

    ResourceRequirements

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    limitsResourceListResourceList
    requestsResourceListResourceList
    -

    ResourceTemplate

    -
    -

    ResourceTemplate is a template subtype to manipulate kubernetes resources

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    actionstringstringAction is the action to perform to the resource.
    Must be one of: get, create, apply, delete, replace, patch
    failureConditionstringstringFailureCondition is a label selector expression which describes the conditions
    of the k8s resource in which the step was considered failed
    flags[]string[]stringFlags is a set of additional options passed to kubectl before submitting a resource
    I.e. to disable resource validation:
    flags: [
    "--validate=false" # disable resource validation
    ]
    manifeststringstringManifest contains the kubernetes manifest
    manifestFromManifestFromManifestFrom
    mergeStrategystringstringMergeStrategy is the strategy used to merge a patch. It defaults to "strategic"
    Must be one of: strategic, merge, json
    setOwnerReferencebooleanboolSetOwnerReference sets the reference to the workflow on the OwnerReference of generated resource.
    successConditionstringstringSuccessCondition is a label selector expression which describes the conditions
    of the k8s resource in which it is acceptable to proceed to the following step
    -

    RetryAffinity

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    nodeAntiAffinityRetryNodeAntiAffinityRetryNodeAntiAffinity
    -

    RetryNodeAntiAffinity

    -
    -

    In order to prevent running steps on the same host, it uses "kubernetes.io/hostname".

    -
    -

    interface{}

    -

    RetryPolicy

    - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    RetryPolicystringstring
    -

    RetryStrategy

    -
    -

    RetryStrategy provides controls on how to retry a workflow step

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    affinityRetryAffinityRetryAffinity
    backoffBackoffBackoff
    expressionstringstringExpression is a condition expression for when a node will be retried. If it evaluates to false, the node will not
    be retried and the retry strategy will be ignored
    limitIntOrStringIntOrString
    retryPolicyRetryPolicyRetryPolicy
    -

    S3Artifact

    -
    -

    S3Artifact is the location of an S3 artifact

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    accessKeySecretSecretKeySelectorSecretKeySelector
    bucketstringstringBucket is the name of the bucket
    caSecretSecretKeySelectorSecretKeySelector
    createBucketIfNotPresentCreateS3BucketOptionsCreateS3BucketOptions
    encryptionOptionsS3EncryptionOptionsS3EncryptionOptions
    endpointstringstringEndpoint is the hostname of the bucket endpoint
    insecurebooleanboolInsecure will connect to the service with TLS
    keystringstringKey is the key in the bucket where the artifact resides
    regionstringstringRegion contains the optional bucket region
    roleARNstringstringRoleARN is the Amazon Resource Name (ARN) of the role to assume.
    secretKeySecretSecretKeySelectorSecretKeySelector
    useSDKCredsbooleanboolUseSDKCreds tells the driver to figure out credentials based on sdk defaults.
    -

    S3EncryptionOptions

    -
    -

    S3EncryptionOptions used to determine encryption options during s3 operations

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    enableEncryptionbooleanboolEnableEncryption tells the driver to encrypt objects if set to true. If kmsKeyId and serverSideCustomerKeySecret are not set, SSE-S3 will be used
    kmsEncryptionContextstringstringKmsEncryptionContext is a json blob that contains an encryption context. See https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context for more information
    kmsKeyIdstringstringKMSKeyId tells the driver to encrypt the object using the specified KMS Key.
    serverSideCustomerKeySecretSecretKeySelectorSecretKeySelector
    -

    SELinuxOptions

    -
    -

    SELinuxOptions are the labels to be applied to the container

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    levelstringstringLevel is SELinux level label that applies to the container.
    +optional
    rolestringstringRole is a SELinux role label that applies to the container.
    +optional
    typestringstringType is a SELinux type label that applies to the container.
    +optional
    userstringstringUser is a SELinux user label that applies to the container.
    +optional
    -

    ScaleIOVolumeSource

    -
    -

    ScaleIOVolumeSource represents a persistent ScaleIO volume

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    fsTypestringstringfsType is the filesystem type to mount.
    Must be a filesystem type supported by the host operating system.
    Ex. "ext4", "xfs", "ntfs".
    Default is "xfs".
    +optional
    gatewaystringstringgateway is the host address of the ScaleIO API Gateway.
    protectionDomainstringstringprotectionDomain is the name of the ScaleIO Protection Domain for the configured storage.
    +optional
    readOnlybooleanboolreadOnly Defaults to false (read/write). ReadOnly here will force
    the ReadOnly setting in VolumeMounts.
    +optional
    secretRefLocalObjectReferenceLocalObjectReference
    sslEnabledbooleanboolsslEnabled Flag enable/disable SSL communication with Gateway, default false
    +optional
    storageModestringstringstorageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
    Default is ThinProvisioned.
    +optional
    storagePoolstringstringstoragePool is the ScaleIO Storage Pool associated with the protection domain.
    +optional
    systemstringstringsystem is the name of the storage system as configured in ScaleIO.
    volumeNamestringstringvolumeName is the name of a volume already created in the ScaleIO system
    that is associated with this volume source.
    -

    ScriptTemplate

    -
    -

    ScriptTemplate is a template subtype to enable scripting through code steps

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    args[]string[]stringArguments to the entrypoint.
    The container image's CMD is used if this is not provided.
    Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
    cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
    to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
    produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
    of whether the variable exists or not. Cannot be updated.
    More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
    +optional
    command[]string[]stringEntrypoint array. Not executed within a shell.
    The container image's ENTRYPOINT is used if this is not provided.
    Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
    cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
    to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
    produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
    of whether the variable exists or not. Cannot be updated.
    More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
    +optional
    env[]EnvVar[]*EnvVarList of environment variables to set in the container.
    Cannot be updated.
    +optional
    +patchMergeKey=name
    +patchStrategy=merge
    envFrom[]EnvFromSource[]*EnvFromSourceList of sources to populate environment variables in the container.
    The keys defined within a source must be a C_IDENTIFIER. All invalid keys
    will be reported as an event when the container is starting. When a key exists in multiple
    sources, the value associated with the last source will take precedence.
    Values defined by an Env with a duplicate key will take precedence.
    Cannot be updated.
    +optional
    imagestringstringContainer image name.
    More info: https://kubernetes.io/docs/concepts/containers/images
    This field is optional to allow higher level config management to default or override
    container images in workload controllers like Deployments and StatefulSets.
    +optional
    imagePullPolicyPullPolicyPullPolicy
    lifecycleLifecycleLifecycle
    livenessProbeProbeProbe
    namestringstringName of the container specified as a DNS_LABEL.
    Each container in a pod must have a unique name (DNS_LABEL).
    Cannot be updated.
    ports[]ContainerPort[]*ContainerPortList of ports to expose from the container. Exposing a port here gives
    the system additional information about the network connections a
    container uses, but is primarily informational. Not specifying a port here
    DOES NOT prevent that port from being exposed. Any port which is
    listening on the default "0.0.0.0" address inside a container will be
    accessible from the network.
    Cannot be updated.
    +optional
    +patchMergeKey=containerPort
    +patchStrategy=merge
    +listType=map
    +listMapKey=containerPort
    +listMapKey=protocol
    readinessProbeProbeProbe
    resourcesResourceRequirementsResourceRequirements
    securityContextSecurityContextSecurityContext
    sourcestringstringSource contains the source code of the script to execute
    startupProbeProbeProbe
    stdinbooleanboolWhether this container should allocate a buffer for stdin in the container runtime. If this
    is not set, reads from stdin in the container will always result in EOF.
    Default is false.
    +optional
    stdinOncebooleanboolWhether the container runtime should close the stdin channel after it has been opened by
    a single attach. When stdin is true the stdin stream will remain open across multiple attach
    sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
    first client attaches to stdin, and then remains open and accepts data until the client disconnects,
    at which time stdin is closed and remains closed until the container is restarted. If this
    flag is false, a container processes that reads from stdin will never receive an EOF.
    Default is false
    +optional
    terminationMessagePathstringstringOptional: Path at which the file to which the container's termination message
    will be written is mounted into the container's filesystem.
    Message written is intended to be brief final status, such as an assertion failure message.
    Will be truncated by the node if greater than 4096 bytes. The total message length across
    all containers will be limited to 12kb.
    Defaults to /dev/termination-log.
    Cannot be updated.
    +optional
    terminationMessagePolicyTerminationMessagePolicyTerminationMessagePolicy
    ttybooleanboolWhether this container should allocate a TTY for itself, also requires 'stdin' to be true.
    Default is false.
    +optional
    volumeDevices[]VolumeDevice[]*VolumeDevicevolumeDevices is the list of block devices to be used by the container.
    +patchMergeKey=devicePath
    +patchStrategy=merge
    +optional
    volumeMounts[]VolumeMount[]*VolumeMountPod volumes to mount into the container's filesystem.
    Cannot be updated.
    +optional
    +patchMergeKey=mountPath
    +patchStrategy=merge
    workingDirstringstringContainer's working directory.
    If not specified, the container runtime's default will be used, which
    might be configured in the container image.
    Cannot be updated.
    +optional
    -

    SeccompProfile

    -
    -

    Only one profile source may be set. -+union

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    localhostProfilestringstringlocalhostProfile indicates a profile defined in a file on the node should be used.
    The profile must be preconfigured on the node to work.
    Must be a descending path, relative to the kubelet's configured seccomp profile location.
    Must only be set if type is "Localhost".
    +optional
    typeSeccompProfileTypeSeccompProfileType
    -

    SeccompProfileType

    -
    -

    +enum

    -
    - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    SeccompProfileTypestringstring+enum
    -

    SecretEnvSource

    -
    -

    The contents of the target Secret's Data field will represent the -key-value pairs as environment variables.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    namestringstringName of the referent.
    More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
    TODO: Add other useful fields. apiVersion, kind, uid?
    +optional
    optionalbooleanboolSpecify whether the Secret must be defined
    +optional
    -

    SecretKeySelector

    -
    -

    +structType=atomic

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    keystringstringThe key of the secret to select from. Must be a valid secret key.
    namestringstringName of the referent.
    More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
    TODO: Add other useful fields. apiVersion, kind, uid?
    +optional
    optionalbooleanboolSpecify whether the Secret or its key must be defined
    +optional
    -

    SecretProjection

    -
    -

    The contents of the target Secret's Data field will be presented in a -projected volume as files using the keys in the Data field as the file names. -Note that this is identical to a secret volume source without the default -mode.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    items[]KeyToPath[]*KeyToPathitems if unspecified, each key-value pair in the Data field of the referenced
    Secret will be projected into the volume as a file whose name is the
    key and content is the value. If specified, the listed keys will be
    projected into the specified paths, and unlisted keys will not be
    present. If a key is specified which is not present in the Secret,
    the volume setup will error unless it is marked optional. Paths must be
    relative and may not contain the '..' path or start with '..'.
    +optional
    namestringstringName of the referent.
    More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
    TODO: Add other useful fields. apiVersion, kind, uid?
    +optional
    optionalbooleanbooloptional field specify whether the Secret or its key must be defined
    +optional
    -

    SecretVolumeSource

    -
    -

    The contents of the target Secret's Data field will be presented in a volume -as files using the keys in the Data field as the file names. -Secret volumes support ownership management and SELinux relabeling.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    defaultModeint32 (formatted integer)int32defaultMode is Optional: mode bits used to set permissions on created files by default.
    Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
    YAML accepts both octal and decimal values, JSON requires decimal values
    for mode bits. Defaults to 0644.
    Directories within the path are not affected by this setting.
    This might be in conflict with other options that affect the file
    mode, like fsGroup, and the result can be other mode bits set.
    +optional
    items[]KeyToPath[]*KeyToPathitems If unspecified, each key-value pair in the Data field of the referenced
    Secret will be projected into the volume as a file whose name is the
    key and content is the value. If specified, the listed keys will be
    projected into the specified paths, and unlisted keys will not be
    present. If a key is specified which is not present in the Secret,
    the volume setup will error unless it is marked optional. Paths must be
    relative and may not contain the '..' path or start with '..'.
    +optional
    optionalbooleanbooloptional field specify whether the Secret or its keys must be defined
    +optional
    secretNamestringstringsecretName is the name of the secret in the pod's namespace to use.
    More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
    +optional
    -

    SecurityContext

    -
    -

    Some fields are present in both SecurityContext and PodSecurityContext. When both -are set, the values in SecurityContext take precedence.

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    allowPrivilegeEscalationbooleanboolAllowPrivilegeEscalation controls whether a process can gain more
    privileges than its parent process. This bool directly controls if
    the no_new_privs flag will be set on the container process.
    AllowPrivilegeEscalation is true always when the container is:
    1) run as Privileged
    2) has CAP_SYS_ADMIN
    Note that this field cannot be set when spec.os.name is windows.
    +optional
    capabilitiesCapabilitiesCapabilities
    privilegedbooleanboolRun container in privileged mode.
    Processes in privileged containers are essentially equivalent to root on the host.
    Defaults to false.
    Note that this field cannot be set when spec.os.name is windows.
    +optional
    procMountProcMountTypeProcMountType
    readOnlyRootFilesystembooleanboolWhether this container has a read-only root filesystem.
    Default is false.
    Note that this field cannot be set when spec.os.name is windows.
    +optional
    runAsGroupint64 (formatted integer)int64The GID to run the entrypoint of the container process.
    Uses runtime default if unset.
    May also be set in PodSecurityContext. If set in both SecurityContext and
    PodSecurityContext, the value specified in SecurityContext takes precedence.
    Note that this field cannot be set when spec.os.name is windows.
    +optional
    runAsNonRootbooleanboolIndicates that the container must run as a non-root user.
    If true, the Kubelet will validate the image at runtime to ensure that it
    does not run as UID 0 (root) and fail to start the container if it does.
    If unset or false, no such validation will be performed.
    May also be set in PodSecurityContext. If set in both SecurityContext and
    PodSecurityContext, the value specified in SecurityContext takes precedence.
    +optional
    runAsUserint64 (formatted integer)int64The UID to run the entrypoint of the container process.
    Defaults to user specified in image metadata if unspecified.
    May also be set in PodSecurityContext. If set in both SecurityContext and
    PodSecurityContext, the value specified in SecurityContext takes precedence.
    Note that this field cannot be set when spec.os.name is windows.
    +optional
    seLinuxOptionsSELinuxOptionsSELinuxOptions
    seccompProfileSeccompProfileSeccompProfile
    windowsOptionsWindowsSecurityContextOptionsWindowsSecurityContextOptions
    -

    SemaphoreRef

    -
    -

    SemaphoreRef is a reference of Semaphore

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    configMapKeyRefConfigMapKeySelectorConfigMapKeySelector
    namespacestringstring"[namespace of workflow]"
    -

    Sequence

    -
    -

    Sequence expands a workflow step into numeric range

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    countIntOrStringIntOrString
    endIntOrStringIntOrString
    formatstringstringFormat is a printf format string to format the value in the sequence
    startIntOrStringIntOrString
    -

    ServiceAccountTokenProjection

    -
    -

    ServiceAccountTokenProjection represents a projected service account token -volume. This projection can be used to insert a service account token into -the pods runtime filesystem for use against APIs (Kubernetes API Server or -otherwise).

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    audiencestringstringaudience is the intended audience of the token. A recipient of a token
    must identify itself with an identifier specified in the audience of the
    token, and otherwise should reject the token. The audience defaults to the
    identifier of the apiserver.
    +optional
    expirationSecondsint64 (formatted integer)int64expirationSeconds is the requested duration of validity of the service
    account token. As the token approaches expiration, the kubelet volume
    plugin will proactively rotate the service account token. The kubelet will
    start trying to rotate the token if the token is older than 80 percent of
    its time to live or if the token is older than 24 hours.Defaults to 1 hour
    and must be at least 10 minutes.
    +optional
    pathstringstringpath is the path relative to the mount point of the file to project the
    token into.
    -

    StorageMedium

    - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    StorageMediumstringstring
    -

    StorageOSVolumeSource

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    fsTypestringstringfsType is the filesystem type to mount.
    Must be a filesystem type supported by the host operating system.
    Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
    +optional
    readOnlybooleanboolreadOnly defaults to false (read/write). ReadOnly here will force
    the ReadOnly setting in VolumeMounts.
    +optional
    secretRefLocalObjectReferenceLocalObjectReference
    volumeNamestringstringvolumeName is the human-readable name of the StorageOS volume. Volume
    names are only unique within a namespace.
    volumeNamespacestringstringvolumeNamespace specifies the scope of the volume within StorageOS. If no
    namespace is specified then the Pod's namespace will be used. This allows the
    Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
    Set VolumeName to any name to override the default behaviour.
    Set to "default" if you are not using namespaces within StorageOS.
    Namespaces that do not pre-exist within StorageOS will be created.
    +optional
    -

    SuppliedValueFrom

    -

    interface{}

    -

    SuspendTemplate

    -
    -

    SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    durationstringstringDuration is the seconds to wait before automatically resuming a template. Must be a string. Default unit is seconds.
    Could also be a Duration, e.g.: "2m", "6h"
    -

    Synchronization

    -
    -

    Synchronization holds synchronization lock configuration

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    mutexMutexMutex
    semaphoreSemaphoreRefSemaphoreRef
    -

    Sysctl

    -
    -

    Sysctl defines a kernel parameter to be set

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    namestringstringName of a property to set
    valuestringstringValue of a property to set
    -

    TCPSocketAction

    -
    -

    TCPSocketAction describes an action based on opening a socket

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    hoststringstringOptional: Host name to connect to, defaults to the pod IP.
    +optional
    portIntOrStringIntOrString
    -

    TaintEffect

    -
    -

    +enum

    -
    - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    TaintEffectstringstring+enum
    -

    TarStrategy

    -
    -

    TarStrategy will tar and gzip the file or directory when saving

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    compressionLevelint32 (formatted integer)int32CompressionLevel specifies the gzip compression level to use for the artifact.
    Defaults to gzip.DefaultCompression.
    -

    Template

    -
    -

    Template is a reusable and composable unit of execution in a workflow

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    activeDeadlineSecondsIntOrStringIntOrString
    affinityAffinityAffinity
    archiveLocationArtifactLocationArtifactLocation
    automountServiceAccountTokenbooleanboolAutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods.
    ServiceAccountName of ExecutorConfig must be specified if this value is false.
    containerContainerContainer
    containerSetContainerSetTemplateContainerSetTemplate
    daemonbooleanboolDaemon will allow a workflow to proceed to the next step so long as the container reaches readiness
    dagDAGTemplateDAGTemplate
    dataDataData
    executorExecutorConfigExecutorConfig
    failFastbooleanboolFailFast, if specified, will fail this template if any of its child pods has failed. This is useful for when this
    template is expanded with withItems, etc.
    hostAliases[]HostAlias[]*HostAliasHostAliases is an optional list of hosts and IPs that will be injected into the pod spec
    +patchStrategy=merge
    +patchMergeKey=ip
    httpHTTPHTTP
    initContainers[]UserContainer[]*UserContainerInitContainers is a list of containers which run before the main container.
    +patchStrategy=merge
    +patchMergeKey=name
    inputsInputsInputs
    memoizeMemoizeMemoize
    metadataMetadataMetadata
    metricsMetricsMetrics
    namestringstringName is the name of the template
    nodeSelectormap of stringmap[string]stringNodeSelector is a selector to schedule this step of the workflow to be
    run on the selected node(s). Overrides the selector set at the workflow level.
    outputsOutputsOutputs
    parallelismint64 (formatted integer)int64Parallelism limits the max total parallel pods that can execute at the same time within the
    boundaries of this template invocation. If additional steps/dag templates are invoked, the
    pods created by those templates will not be counted towards this total.
    pluginPluginPlugin
    podSpecPatchstringstringPodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of
    container fields which are not strings (e.g. resource limits).
    priorityint32 (formatted integer)int32Priority to apply to workflow pods.
    priorityClassNamestringstringPriorityClassName to apply to workflow pods.
    resourceResourceTemplateResourceTemplate
    retryStrategyRetryStrategyRetryStrategy
    schedulerNamestringstringIf specified, the pod will be dispatched by specified scheduler.
    Or it will be dispatched by workflow scope scheduler if specified.
    If neither specified, the pod will be dispatched by default scheduler.
    +optional
    scriptScriptTemplateScriptTemplate
    securityContextPodSecurityContextPodSecurityContext
    serviceAccountNamestringstringServiceAccountName to apply to workflow pods
    sidecars[]UserContainer[]*UserContainerSidecars is a list of containers which run alongside the main container
    Sidecars are automatically killed when the main container completes
    +patchStrategy=merge
    +patchMergeKey=name
    steps[]ParallelSteps[]ParallelStepsSteps define a series of sequential/parallel workflow steps
    suspendSuspendTemplateSuspendTemplate
    synchronizationSynchronizationSynchronization
    timeoutstringstringTimeout allows to set the total node execution timeout duration counting from the node's start time.
    This duration also includes time in which the node spends in Pending state. This duration may not be applied to Step or DAG templates.
    tolerations[]Toleration[]*TolerationTolerations to apply to workflow pods.
    +patchStrategy=merge
    +patchMergeKey=key
    volumes[]Volume[]*VolumeVolumes is a list of volumes that can be mounted by containers in a template.
    +patchStrategy=merge
    +patchMergeKey=name
    -

    TemplateRef

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    clusterScopebooleanboolClusterScope indicates the referred template is cluster scoped (i.e. a ClusterWorkflowTemplate).
    namestringstringName is the resource name of the template.
    templatestringstringTemplate is the name of referred template in the resource.
    -

    TerminationMessagePolicy

    -
    -

    +enum

    -
    - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    TerminationMessagePolicystringstring+enum
    -

    Time

    -
    -

    +protobuf.options.marshal=false -+protobuf.as=Timestamp -+protobuf.options.(gogoproto.goproto_stringer)=false

    -
    -

    interface{}

    -

    Toleration

    -
    -

    The pod this Toleration is attached to tolerates any taint that matches -the triple using the matching operator .

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    effectTaintEffectTaintEffect
    keystringstringKey is the taint key that the toleration applies to. Empty means match all taint keys.
    If the key is empty, operator must be Exists; this combination means to match all values and all keys.
    +optional
    operatorTolerationOperatorTolerationOperator
    tolerationSecondsint64 (formatted integer)int64TolerationSeconds represents the period of time the toleration (which must be
    of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
    it is not set, which means tolerate the taint forever (do not evict). Zero and
    negative values will be treated as 0 (evict immediately) by the system.
    +optional
    valuestringstringValue is the taint value the toleration matches to.
    If the operator is Exists, the value should be empty, otherwise just a regular string.
    +optional
    -

    TolerationOperator

    -
    -

    +enum

    -
    - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    TolerationOperatorstringstring+enum
    -

    Transformation

    -

    []TransformationStep

    -

    TransformationStep

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    expressionstringstringExpression defines an expr expression to apply
    -

    Type

    - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    Typeint64 (formatted integer)int64
    -

    TypedLocalObjectReference

    -
    -

    TypedLocalObjectReference contains enough information to let you locate the -typed referenced object inside the same namespace. -+structType=atomic

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    apiGroupstringstringAPIGroup is the group for the resource being referenced.
    If APIGroup is not specified, the specified Kind must be in the core API group.
    For any other third-party types, APIGroup is required.
    +optional
    kindstringstringKind is the type of resource being referenced
    namestringstringName is the name of resource being referenced
    -

    UID

    -
    -

    UID is a type that holds unique ID values, including UUIDs. Because we -don't ONLY use UUIDs, this is an alias to string. Being a type captures -intent and helps make sure that UIDs and names do not get conflated.

    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    UIDstringstringUID is a type that holds unique ID values, including UUIDs. Because we
    don't ONLY use UUIDs, this is an alias to string. Being a type captures
    intent and helps make sure that UIDs and names do not get conflated.
    -

    URIScheme

    -
    -

    URIScheme identifies the scheme used for connection to a host for Get actions -+enum

    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeDefaultDescriptionExample
    URISchemestringstringURIScheme identifies the scheme used for connection to a host for Get actions
    +enum
    -

    UserContainer

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    args[]string[]stringArguments to the entrypoint.
    The container image's CMD is used if this is not provided.
    Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
    cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
    to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
    produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
    of whether the variable exists or not. Cannot be updated.
    More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
    +optional
    command[]string[]stringEntrypoint array. Not executed within a shell.
    The container image's ENTRYPOINT is used if this is not provided.
    Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
    cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
    to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
    produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
    of whether the variable exists or not. Cannot be updated.
    More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
    +optional
    env[]EnvVar[]*EnvVarList of environment variables to set in the container.
    Cannot be updated.
    +optional
    +patchMergeKey=name
    +patchStrategy=merge
    envFrom[]EnvFromSource[]*EnvFromSourceList of sources to populate environment variables in the container.
    The keys defined within a source must be a C_IDENTIFIER. All invalid keys
    will be reported as an event when the container is starting. When a key exists in multiple
    sources, the value associated with the last source will take precedence.
    Values defined by an Env with a duplicate key will take precedence.
    Cannot be updated.
    +optional
    imagestringstringContainer image name.
    More info: https://kubernetes.io/docs/concepts/containers/images
    This field is optional to allow higher level config management to default or override
    container images in workload controllers like Deployments and StatefulSets.
    +optional
    imagePullPolicyPullPolicyPullPolicy
    lifecycleLifecycleLifecycle
    livenessProbeProbeProbe
    mirrorVolumeMountsbooleanboolMirrorVolumeMounts will mount the same volumes specified in the main container
    to the container (including artifacts), at the same mountPaths. This enables
    dind daemon to partially see the same filesystem as the main container in
    order to use features such as docker volume binding
    namestringstringName of the container specified as a DNS_LABEL.
    Each container in a pod must have a unique name (DNS_LABEL).
    Cannot be updated.
    ports[]ContainerPort[]*ContainerPortList of ports to expose from the container. Exposing a port here gives
    the system additional information about the network connections a
    container uses, but is primarily informational. Not specifying a port here
    DOES NOT prevent that port from being exposed. Any port which is
    listening on the default "0.0.0.0" address inside a container will be
    accessible from the network.
    Cannot be updated.
    +optional
    +patchMergeKey=containerPort
    +patchStrategy=merge
    +listType=map
    +listMapKey=containerPort
    +listMapKey=protocol
    readinessProbeProbeProbe
    resourcesResourceRequirementsResourceRequirements
    securityContextSecurityContextSecurityContext
    startupProbeProbeProbe
    stdinbooleanboolWhether this container should allocate a buffer for stdin in the container runtime. If this
    is not set, reads from stdin in the container will always result in EOF.
    Default is false.
    +optional
    stdinOncebooleanboolWhether the container runtime should close the stdin channel after it has been opened by
    a single attach. When stdin is true the stdin stream will remain open across multiple attach
    sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
    first client attaches to stdin, and then remains open and accepts data until the client disconnects,
    at which time stdin is closed and remains closed until the container is restarted. If this
    flag is false, a container processes that reads from stdin will never receive an EOF.
    Default is false
    +optional
    terminationMessagePathstringstringOptional: Path at which the file to which the container's termination message
    will be written is mounted into the container's filesystem.
    Message written is intended to be brief final status, such as an assertion failure message.
    Will be truncated by the node if greater than 4096 bytes. The total message length across
    all containers will be limited to 12kb.
    Defaults to /dev/termination-log.
    Cannot be updated.
    +optional
    terminationMessagePolicyTerminationMessagePolicyTerminationMessagePolicy
    ttybooleanboolWhether this container should allocate a TTY for itself, also requires 'stdin' to be true.
    Default is false.
    +optional
    volumeDevices[]VolumeDevice[]*VolumeDevicevolumeDevices is the list of block devices to be used by the container.
    +patchMergeKey=devicePath
    +patchStrategy=merge
    +optional
    volumeMounts[]VolumeMount[]*VolumeMountPod volumes to mount into the container's filesystem.
    Cannot be updated.
    +optional
    +patchMergeKey=mountPath
    +patchStrategy=merge
    workingDirstringstringContainer's working directory.
    If not specified, the container runtime's default will be used, which
    might be configured in the container image.
    Cannot be updated.
    +optional
    -

    ValueFrom

    -
    -

    ValueFrom describes a location in which to obtain the value to a parameter

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    configMapKeyRefConfigMapKeySelectorConfigMapKeySelector
    defaultAnyStringAnyString
    eventstringstringSelector (https://github.com/antonmedv/expr) that is evaluated against the event to get the value of the parameter. E.g. payload.message
    expressionstringstringExpression, if defined, is evaluated to specify the value for the parameter
    jqFilterstringstringJQFilter expression against the resource object in resource templates
    jsonPathstringstringJSONPath of a resource to retrieve an output parameter value from in resource templates
    parameterstringstringParameter reference to a step or dag task in which to retrieve an output parameter value from
    (e.g. '{{steps.mystep.outputs.myparam}}')
    pathstringstringPath in the container to retrieve an output parameter value from in container templates
    suppliedSuppliedValueFromSuppliedValueFrom
    -

    Volume

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    awsElasticBlockStoreAWSElasticBlockStoreVolumeSourceAWSElasticBlockStoreVolumeSource
    azureDiskAzureDiskVolumeSourceAzureDiskVolumeSource
    azureFileAzureFileVolumeSourceAzureFileVolumeSource
    cephfsCephFSVolumeSourceCephFSVolumeSource
    cinderCinderVolumeSourceCinderVolumeSource
    configMapConfigMapVolumeSourceConfigMapVolumeSource
    csiCSIVolumeSourceCSIVolumeSource
    downwardAPIDownwardAPIVolumeSourceDownwardAPIVolumeSource
    emptyDirEmptyDirVolumeSourceEmptyDirVolumeSource
    ephemeralEphemeralVolumeSourceEphemeralVolumeSource
    fcFCVolumeSourceFCVolumeSource
    flexVolumeFlexVolumeSourceFlexVolumeSource
    flockerFlockerVolumeSourceFlockerVolumeSource
    gcePersistentDiskGCEPersistentDiskVolumeSourceGCEPersistentDiskVolumeSource
    gitRepoGitRepoVolumeSourceGitRepoVolumeSource
    glusterfsGlusterfsVolumeSourceGlusterfsVolumeSource
    hostPathHostPathVolumeSourceHostPathVolumeSource
    iscsiISCSIVolumeSourceISCSIVolumeSource
    namestringstringname of the volume.
    Must be a DNS_LABEL and unique within the pod.
    More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
    nfsNFSVolumeSourceNFSVolumeSource
    persistentVolumeClaimPersistentVolumeClaimVolumeSourcePersistentVolumeClaimVolumeSource
    photonPersistentDiskPhotonPersistentDiskVolumeSourcePhotonPersistentDiskVolumeSource
    portworxVolumePortworxVolumeSourcePortworxVolumeSource
    projectedProjectedVolumeSourceProjectedVolumeSource
    quobyteQuobyteVolumeSourceQuobyteVolumeSource
    rbdRBDVolumeSourceRBDVolumeSource
    scaleIOScaleIOVolumeSourceScaleIOVolumeSource
    secretSecretVolumeSourceSecretVolumeSource
    storageosStorageOSVolumeSourceStorageOSVolumeSource
    vsphereVolumeVsphereVirtualDiskVolumeSourceVsphereVirtualDiskVolumeSource
    -

    VolumeDevice

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    devicePathstringstringdevicePath is the path inside of the container that the device will be mapped to.
    namestringstringname must match the name of a persistentVolumeClaim in the pod
    -

    VolumeMount

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    mountPathstringstringPath within the container at which the volume should be mounted. Must
    not contain ':'.
    mountPropagationMountPropagationModeMountPropagationMode
    namestringstringThis must match the Name of a Volume.
    readOnlybooleanboolMounted read-only if true, read-write otherwise (false or unspecified).
    Defaults to false.
    +optional
    subPathstringstringPath within the volume from which the container's volume should be mounted.
    Defaults to "" (volume's root).
    +optional
    subPathExprstringstringExpanded path within the volume from which the container's volume should be mounted.
    Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
    Defaults to "" (volume's root).
    SubPathExpr and SubPath are mutually exclusive.
    +optional
    -

    VolumeProjection

    -
    -

    Projection that may be projected along with other supported volume types

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    configMapConfigMapProjectionConfigMapProjection
    downwardAPIDownwardAPIProjectionDownwardAPIProjection
    secretSecretProjectionSecretProjection
    serviceAccountTokenServiceAccountTokenProjectionServiceAccountTokenProjection
    -

    VsphereVirtualDiskVolumeSource

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    fsTypestringstringfsType is filesystem type to mount.
    Must be a filesystem type supported by the host operating system.
    Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
    +optional
    storagePolicyIDstringstringstoragePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.
    +optional
    storagePolicyNamestringstringstoragePolicyName is the storage Policy Based Management (SPBM) profile name.
    +optional
    volumePathstringstringvolumePath is the path that identifies vSphere volume vmdk
    -

    WeightedPodAffinityTerm

    -
    -

    The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)

    -
    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    podAffinityTermPodAffinityTermPodAffinityTerm
    weightint32 (formatted integer)int32weight associated with matching the corresponding podAffinityTerm,
    in the range 1-100.
    -

    WindowsSecurityContextOptions

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    gmsaCredentialSpecstringstringGMSACredentialSpec is where the GMSA admission webhook
    (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
    GMSA credential spec named by the GMSACredentialSpecName field.
    +optional
    gmsaCredentialSpecNamestringstringGMSACredentialSpecName is the name of the GMSA credential spec to use.
    +optional
    hostProcessbooleanboolHostProcess determines if a container should be run as a 'Host Process' container.
    This field is alpha-level and will only be honored by components that enable the
    WindowsHostProcessContainers feature flag. Setting this field without the feature
    flag will result in errors when validating the Pod. All of a Pod's containers must
    have the same effective HostProcess value (it is not allowed to have a mix of HostProcess
    containers and non-HostProcess containers). In addition, if HostProcess is true
    then HostNetwork must also be set to true.
    +optional
    runAsUserNamestringstringThe UserName in Windows to run the entrypoint of the container process.
    Defaults to the user specified in image metadata if unspecified.
    May also be set in PodSecurityContext. If set in both SecurityContext and
    PodSecurityContext, the value specified in SecurityContext takes precedence.
    +optional
    -

    Workflow

    -

    Properties

    - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeGo typeRequiredDefaultDescriptionExample
    metadataObjectMetaObjectMeta
    -

    ZipStrategy

    -
    -

    ZipStrategy will unzip zipped input artifacts

    -
    -

    interface{}

    - - - - -

    Comments

    - - +

    The API for an executor plugin. - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/faq/index.html b/faq/index.html index dba47dfccbc6..0be8f29de271 100644 --- a/faq/index.html +++ b/faq/index.html @@ -1,4029 +1,68 @@ - - - - - - - - - - - - - FAQ - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + FAQ - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - - - - +
    +
    +
    +
    - - - - - - - - -

    FAQ

    -

    "token not valid", "any bearer token is able to login in the UI or use the API"

    -

    You may not have configured Argo Server authentication correctly.

    -

    If you want SSO, try running with --auth-mode=sso. -If you're using --auth-mode=client, make sure you have Bearer in front of the ServiceAccount Secret, as mentioned in Access Token.

    -

    Learn more about the Argo Server set-up

    -

    Argo Server return EOF error

    -

    Since v3.0 the Argo Server listens for HTTPS requests, rather than HTTP. Try changing your URL to HTTPS, or start Argo Server using --secure=false.

    -

    My workflow hangs

    -

    Check your wait container logs:

    -

    Is there an RBAC error?

    -

    Learn more about workflow RBAC

    -

    Return "unknown (get pods)" error

    -

    You're probably getting a permission denied error because your RBAC is not configured.

    -

    Learn more about workflow RBAC and even more details

    -

    There is an error about /var/run/docker.sock

    -

    Try using a different container runtime executor.

    -

    Learn more about executors

    - - - - -

    Comments

    - - +

    FAQ - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/fields/index.html b/fields/index.html index 76d29e60a4b3..12e193189e7a 100644 --- a/fields/index.html +++ b/fields/index.html @@ -1,20605 +1,68 @@ - - - - - - - - - - - - - Field Reference - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Field Reference - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - -
    +
    - - - - - - - -
  • - - Prometheus Metrics - -
  • - - - - - - - - - -
  • - - Workflow Executors - -
  • - - - - - - - - - -
  • - - Workflow Restrictions - -
  • - - - - - - - - - -
  • - - Sidecar Injection - -
  • - - - - - - - - - -
  • - - Service Account Secrets - -
  • - - - - - - - - - - - - - - - - - -
  • - - - - - - - - - - -
  • - - - - - - - - - - -
  • - - - - - - - - - - -
  • - - - - - - - - - -
  • - - Windows Container Support - -
  • - - - - - - - - - - - - - - - - - - -
  • - - - - - - - - - - -
  • - - - - - - - - - - -
  • - - Roadmap - -
  • - - - - - - - - - - -
  • - - Blog - -
  • - - - - - - - - - - -
  • - - Slack - -
  • - - - - - - - - - - -
  • - - Twitter - -
  • - - - - - - - - - - -
  • - - LinkedIn - -
  • - - - - - -
    - - - - - -
    -
    -
    - - - -
    -
    -
    - - -
    -
    - - - - - - - - -

    Field Reference

    -

    Workflow

    -

    Workflow is the definition of a workflow resource

    -
    -Examples (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    apiVersionstringAPIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.io.k8s.community/contributors/devel/sig-architecture/api-conventions.md#resources
    kindstringKind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.io.k8s.community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
    metadataObjectMetaNo description available
    specWorkflowSpecNo description available
    statusWorkflowStatusNo description available
    -

    CronWorkflow

    -

    CronWorkflow is the definition of a scheduled workflow resource

    -
    -Examples (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    apiVersionstringAPIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.io.k8s.community/contributors/devel/sig-architecture/api-conventions.md#resources
    kindstringKind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.io.k8s.community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
    metadataObjectMetaNo description available
    specCronWorkflowSpecNo description available
    statusCronWorkflowStatusNo description available
    -

    WorkflowTemplate

    -

    WorkflowTemplate is the definition of a workflow template resource

    -
    -Examples (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    apiVersionstringAPIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.io.k8s.community/contributors/devel/sig-architecture/api-conventions.md#resources
    kindstringKind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.io.k8s.community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
    metadataObjectMetaNo description available
    specWorkflowSpecNo description available
    -

    WorkflowSpec

    -

    WorkflowSpec is the specification of a Workflow.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    activeDeadlineSecondsintegerOptional duration in seconds relative to the workflow start time which the workflow is allowed to run before the controller terminates the io.argoproj.workflow.v1alpha1. A value of zero is used to terminate a Running workflow
    affinityAffinityAffinity sets the scheduling constraints for all pods in the io.argoproj.workflow.v1alpha1. Can be overridden by an affinity specified in the template
    archiveLogsbooleanArchiveLogs indicates if the container logs should be archived
    argumentsArgumentsArguments contain the parameters and artifacts sent to the workflow entrypoint Parameters are referencable globally using the 'workflow' variable prefix. e.g. {{io.argoproj.workflow.v1alpha1.parameters.myparam}}
    artifactGCWorkflowLevelArtifactGCArtifactGC describes the strategy to use when deleting artifacts from completed or deleted workflows (applies to all output Artifacts unless Artifact.ArtifactGC is specified, which overrides this)
    artifactRepositoryRefArtifactRepositoryRefArtifactRepositoryRef specifies the configMap name and key containing the artifact repository config.
    automountServiceAccountTokenbooleanAutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. ServiceAccountName of ExecutorConfig must be specified if this value is false.
    dnsConfigPodDNSConfigPodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.
    dnsPolicystringSet DNS policy for the pod. Defaults to "ClusterFirst". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.
    entrypointstringEntrypoint is a template reference to the starting point of the io.argoproj.workflow.v1alpha1.
    executorExecutorConfigExecutor holds configurations of executor containers of the io.argoproj.workflow.v1alpha1.
    hooksLifecycleHookHooks holds the lifecycle hook which is invoked at lifecycle of step, irrespective of the success, failure, or error status of the primary step
    hostAliasesArray<HostAlias>No description available
    hostNetworkbooleanHost networking requested for this workflow pod. Default to false.
    imagePullSecretsArray<LocalObjectReference>ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
    metricsMetricsMetrics are a list of metrics emitted from this Workflow
    nodeSelectorMap< string , string >NodeSelector is a selector which will result in all pods of the workflow to be scheduled on the selected node(s). This is able to be overridden by a nodeSelector specified in the template.
    onExitstringOnExit is a template reference which is invoked at the end of the workflow, irrespective of the success, failure, or error of the primary io.argoproj.workflow.v1alpha1.
    parallelismintegerParallelism limits the max total parallel pods that can execute at the same time in a workflow
    podDisruptionBudgetPodDisruptionBudgetSpecPodDisruptionBudget holds the number of concurrent disruptions that you allow for Workflow's Pods. Controller will automatically add the selector with workflow name, if selector is empty. Optional: Defaults to empty.
    podGCPodGCPodGC describes the strategy to use when deleting completed pods
    podMetadataMetadataPodMetadata defines additional metadata that should be applied to workflow pods
    ~~podPriority~~~~integer~~~~Priority to apply to workflow pods.~~ DEPRECATED: Use PodPriorityClassName instead.
    podPriorityClassNamestringPriorityClassName to apply to workflow pods.
    podSpecPatchstringPodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of container fields which are not strings (e.g. resource limits).
    priorityintegerPriority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first.
    retryStrategyRetryStrategyRetryStrategy for all templates in the io.argoproj.workflow.v1alpha1.
    schedulerNamestringSet scheduler name for all pods. Will be overridden if container/script template's scheduler name is set. Default scheduler will be used if neither specified.
    securityContextPodSecurityContextSecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.
    serviceAccountNamestringServiceAccountName is the name of the ServiceAccount to run all pods of the workflow as.
    shutdownstringShutdown will shutdown the workflow according to its ShutdownStrategy
    suspendbooleanSuspend will suspend the workflow and prevent execution of any future steps in the workflow
    synchronizationSynchronizationSynchronization holds synchronization lock configuration for this Workflow
    templateDefaultsTemplateTemplateDefaults holds default template values that will apply to all templates in the Workflow, unless overridden on the template-level
    templatesArray<Template>Templates is a list of workflow templates used in a workflow
    tolerationsArray<Toleration>Tolerations to apply to workflow pods.
    ttlStrategyTTLStrategyTTLStrategy limits the lifetime of a Workflow that has finished execution depending on if it Succeeded or Failed. If this struct is set, once the Workflow finishes, it will be deleted after the time to live expires. If this field is unset, the controller config map will hold the default values.
    volumeClaimGCVolumeClaimGCVolumeClaimGC describes the strategy to use when deleting volumes from completed workflows
    volumeClaimTemplatesArray<PersistentVolumeClaim>VolumeClaimTemplates is a list of claims that containers are allowed to reference. The Workflow controller will create the claims at the beginning of the workflow and delete the claims upon completion of the workflow
    volumesArray<Volume>Volumes is a list of volumes that can be mounted by containers in a io.argoproj.workflow.v1alpha1.
    workflowMetadataWorkflowMetadataWorkflowMetadata contains some metadata of the workflow to refer to
    workflowTemplateRefWorkflowTemplateRefWorkflowTemplateRef holds a reference to a WorkflowTemplate for execution
    -

    WorkflowStatus

    -

    WorkflowStatus contains overall status information about a workflow

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    artifactGCStatusArtGCStatusArtifactGCStatus maintains the status of Artifact Garbage Collection
    artifactRepositoryRefArtifactRepositoryRefStatusArtifactRepositoryRef is used to cache the repository to use so we do not need to determine it everytime we reconcile.
    compressedNodesstringCompressed and base64 decoded Nodes map
    conditionsArray<Condition>Conditions is a list of conditions the Workflow may have
    estimatedDurationintegerEstimatedDuration in seconds.
    finishedAtTimeTime at which this workflow completed
    messagestringA human readable message indicating details about why the workflow is in this condition.
    nodesNodeStatusNodes is a mapping between a node ID and the node's status.
    offloadNodeStatusVersionstringWhether on not node status has been offloaded to a database. If exists, then Nodes and CompressedNodes will be empty. This will actually be populated with a hash of the offloaded data.
    outputsOutputsOutputs captures output values and artifact locations produced by the workflow via global outputs
    persistentVolumeClaimsArray<Volume>PersistentVolumeClaims tracks all PVCs that were created as part of the io.argoproj.workflow.v1alpha1. The contents of this list are drained at the end of the workflow.
    phasestringPhase a simple, high-level summary of where the workflow is in its lifecycle. Will be "" (Unknown), "Pending", or "Running" before the workflow is completed, and "Succeeded", "Failed" or "Error" once the workflow has completed.
    progressstringProgress to completion
    resourcesDurationMap< integer , int64 >ResourcesDuration is the total for the workflow
    startedAtTimeTime at which this workflow started
    storedTemplatesTemplateStoredTemplates is a mapping between a template ref and the node's status.
    storedWorkflowTemplateSpecWorkflowSpecStoredWorkflowSpec stores the WorkflowTemplate spec for future execution.
    synchronizationSynchronizationStatusSynchronization stores the status of synchronization locks
    taskResultsCompletedMap< boolean , string >Have task results been completed? (mapped by Pod name) used to prevent premature garbage collection of artifacts.
    -

    CronWorkflowSpec

    -

    CronWorkflowSpec is the specification of a CronWorkflow

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    concurrencyPolicystringConcurrencyPolicy is the K8s-style concurrency policy that will be used
    failedJobsHistoryLimitintegerFailedJobsHistoryLimit is the number of failed jobs to be kept at a time
    schedulestringSchedule is a schedule to run the Workflow in Cron format
    startingDeadlineSecondsintegerStartingDeadlineSeconds is the K8s-style deadline that will limit the time a CronWorkflow will be run after its original scheduled time if it is missed.
    successfulJobsHistoryLimitintegerSuccessfulJobsHistoryLimit is the number of successful jobs to be kept at a time
    suspendbooleanSuspend is a flag that will stop new CronWorkflows from running if set to true
    timezonestringTimezone is the timezone against which the cron schedule will be calculated, e.g. "Asia/Tokyo". Default is machine's local time.
    workflowMetadataObjectMetaWorkflowMetadata contains some metadata of the workflow to be run
    workflowSpecWorkflowSpecWorkflowSpec is the spec of the workflow to be run
    -

    CronWorkflowStatus

    -

    CronWorkflowStatus is the status of a CronWorkflow

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    activeArray<ObjectReference>Active is a list of active workflows stemming from this CronWorkflow
    conditionsArray<Condition>Conditions is a list of conditions the CronWorkflow may have
    lastScheduledTimeTimeLastScheduleTime is the last time the CronWorkflow was scheduled
    -

    Arguments

    -

    Arguments to a template

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    artifactsArray<Artifact>Artifacts is the list of artifacts to pass to the template or workflow
    parametersArray<Parameter>Parameters is the list of parameters to pass to the template or workflow
    -

    WorkflowLevelArtifactGC

    -

    WorkflowLevelArtifactGC describes how to delete artifacts from completed Workflows - this spec is used on the Workflow level

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    forceFinalizerRemovalbooleanForceFinalizerRemoval: if set to true, the finalizer will be removed in the case that Artifact GC fails
    podMetadataMetadataPodMetadata is an optional field for specifying the Labels and Annotations that should be assigned to the Pod doing the deletion
    podSpecPatchstringPodSpecPatch holds strategic merge patch to apply against the artgc pod spec.
    serviceAccountNamestringServiceAccountName is an optional field for specifying the Service Account that should be assigned to the Pod doing the deletion
    strategystringStrategy is the strategy to use.
    -

    ArtifactRepositoryRef

    -

    No description available

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    configMapstringThe name of the config map. Defaults to "artifact-repositories".
    keystringThe config map key. Defaults to the value of the "workflows.argoproj.io/default-artifact-repository" annotation.
    -

    ExecutorConfig

    -

    ExecutorConfig holds configurations of an executor container.

    -

    Fields

    - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    serviceAccountNamestringServiceAccountName specifies the service account name of the executor container.
    -

    LifecycleHook

    -

    No description available

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    argumentsArgumentsArguments hold arguments to the template
    expressionstringExpression is a condition expression for when a node will be retried. If it evaluates to false, the node will not be retried and the retry strategy will be ignored
    templatestringTemplate is the name of the template to execute by the hook
    templateRefTemplateRefTemplateRef is the reference to the template resource to execute by the hook
    -

    Metrics

    -

    Metrics are a list of metrics emitted from a Workflow/Template

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    prometheusArray<Prometheus>Prometheus is a list of prometheus metrics to be emitted
    -

    PodGC

    -

    PodGC describes how to delete completed pods as they complete

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    deleteDelayDurationDurationDeleteDelayDuration specifies the duration before pods in the GC queue get deleted.
    labelSelectorLabelSelectorLabelSelector is the label selector to check if the pods match the labels before being added to the pod GC queue.
    strategystringStrategy is the strategy to use. One of "OnPodCompletion", "OnPodSuccess", "OnWorkflowCompletion", "OnWorkflowSuccess". If unset, does not delete Pods
    -

    Metadata

    -

    Pod metdata

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    annotationsMap< string , string >No description available
    labelsMap< string , string >No description available
    -

    RetryStrategy

    -

    RetryStrategy provides controls on how to retry a workflow step

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    affinityRetryAffinityAffinity prevents running workflow's step on the same host
    backoffBackoffBackoff is a backoff strategy
    expressionstringExpression is a condition expression for when a node will be retried. If it evaluates to false, the node will not be retried and the retry strategy will be ignored
    limitIntOrStringLimit is the maximum number of retry attempts when retrying a container. It does not include the original container; the maximum number of total attempts will be limit + 1.
    retryPolicystringRetryPolicy is a policy of NodePhase statuses that will be retried
    -

    Synchronization

    -

    Synchronization holds synchronization lock configuration

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    mutexMutexMutex holds the Mutex lock details
    semaphoreSemaphoreRefSemaphore holds the Semaphore configuration
    -

    Template

    -

    Template is a reusable and composable unit of execution in a workflow

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    activeDeadlineSecondsIntOrStringOptional duration in seconds relative to the StartTime that the pod may be active on a node before the system actively tries to terminate the pod; value must be positive integer This field is only applicable to container and script templates.
    affinityAffinityAffinity sets the pod's scheduling constraints Overrides the affinity set at the workflow level (if any)
    archiveLocationArtifactLocationLocation in which all files related to the step will be stored (logs, artifacts, etc...). Can be overridden by individual items in Outputs. If omitted, will use the default artifact repository location configured in the controller, appended with the / in the key.
    automountServiceAccountTokenbooleanAutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. ServiceAccountName of ExecutorConfig must be specified if this value is false.
    containerContainerContainer is the main container image to run in the pod
    containerSetContainerSetTemplateContainerSet groups multiple containers within a single pod.
    daemonbooleanDaemon will allow a workflow to proceed to the next step so long as the container reaches readiness
    dagDAGTemplateDAG template subtype which runs a DAG
    dataDataData is a data template
    executorExecutorConfigExecutor holds configurations of the executor container.
    failFastbooleanFailFast, if specified, will fail this template if any of its child pods has failed. This is useful for when this template is expanded with withItems, etc.
    hostAliasesArray<HostAlias>HostAliases is an optional list of hosts and IPs that will be injected into the pod spec
    httpHTTPHTTP makes a HTTP request
    initContainersArray<UserContainer>InitContainers is a list of containers which run before the main container.
    inputsInputsInputs describe what inputs parameters and artifacts are supplied to this template
    memoizeMemoizeMemoize allows templates to use outputs generated from already executed templates
    metadataMetadataMetdata sets the pods's metadata, i.e. annotations and labels
    metricsMetricsMetrics are a list of metrics emitted from this template
    namestringName is the name of the template
    nodeSelectorMap< string , string >NodeSelector is a selector to schedule this step of the workflow to be run on the selected node(s). Overrides the selector set at the workflow level.
    outputsOutputsOutputs describe the parameters and artifacts that this template produces
    parallelismintegerParallelism limits the max total parallel pods that can execute at the same time within the boundaries of this template invocation. If additional steps/dag templates are invoked, the pods created by those templates will not be counted towards this total.
    pluginPluginPlugin is a plugin template
    podSpecPatchstringPodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of container fields which are not strings (e.g. resource limits).
    priorityintegerPriority to apply to workflow pods.
    priorityClassNamestringPriorityClassName to apply to workflow pods.
    resourceResourceTemplateResource template subtype which can run k8s resources
    retryStrategyRetryStrategyRetryStrategy describes how to retry a template when it fails
    schedulerNamestringIf specified, the pod will be dispatched by specified scheduler. Or it will be dispatched by workflow scope scheduler if specified. If neither specified, the pod will be dispatched by default scheduler.
    scriptScriptTemplateScript runs a portion of code against an interpreter
    securityContextPodSecurityContextSecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.
    serviceAccountNamestringServiceAccountName to apply to workflow pods
    sidecarsArray<UserContainer>Sidecars is a list of containers which run alongside the main container Sidecars are automatically killed when the main container completes
    stepsArray<Array<WorkflowStep>>Steps define a series of sequential/parallel workflow steps
    suspendSuspendTemplateSuspend template subtype which can suspend a workflow when reaching the step
    synchronizationSynchronizationSynchronization holds synchronization lock configuration for this template
    timeoutstringTimeout allows to set the total node execution timeout duration counting from the node's start time. This duration also includes time in which the node spends in Pending state. This duration may not be applied to Step or DAG templates.
    tolerationsArray<Toleration>Tolerations to apply to workflow pods.
    volumesArray<Volume>Volumes is a list of volumes that can be mounted by containers in a template.
    -

    TTLStrategy

    -

    TTLStrategy is the strategy for the time to live depending on if the workflow succeeded or failed

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    secondsAfterCompletionintegerSecondsAfterCompletion is the number of seconds to live after completion
    secondsAfterFailureintegerSecondsAfterFailure is the number of seconds to live after failure
    secondsAfterSuccessintegerSecondsAfterSuccess is the number of seconds to live after success
    -

    VolumeClaimGC

    -

    VolumeClaimGC describes how to delete volumes from completed Workflows

    -

    Fields

    - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    strategystringStrategy is the strategy to use. One of "OnWorkflowCompletion", "OnWorkflowSuccess". Defaults to "OnWorkflowSuccess"
    -

    WorkflowMetadata

    -

    No description available

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    annotationsMap< string , string >No description available
    labelsMap< string , string >No description available
    labelsFromLabelValueFromNo description available
    -

    WorkflowTemplateRef

    -

    WorkflowTemplateRef is a reference to a WorkflowTemplate resource.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    clusterScopebooleanClusterScope indicates the referred template is cluster scoped (i.e. a ClusterWorkflowTemplate).
    namestringName is the resource name of the workflow template.
    -

    ArtGCStatus

    -

    ArtGCStatus maintains state related to ArtifactGC

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    notSpecifiedbooleanif this is true, we already checked to see if we need to do it and we don't
    podsRecoupedMap< boolean , string >have completed Pods been processed? (mapped by Pod name) used to prevent re-processing the Status of a Pod more than once
    strategiesProcessedMap< boolean , string >have Pods been started to perform this strategy? (enables us not to re-process what we've already done)
    -

    ArtifactRepositoryRefStatus

    -

    No description available

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    artifactRepositoryArtifactRepositoryThe repository the workflow will use. This maybe empty before v3.1.
    configMapstringThe name of the config map. Defaults to "artifact-repositories".
    defaultbooleanIf this ref represents the default artifact repository, rather than a config map.
    keystringThe config map key. Defaults to the value of the "workflows.argoproj.io/default-artifact-repository" annotation.
    namespacestringThe namespace of the config map. Defaults to the workflow's namespace, or the controller's namespace (if found).
    -

    Condition

    -

    No description available

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    messagestringMessage is the condition message
    statusstringStatus is the status of the condition
    typestringType is the type of condition
    -

    NodeStatus

    -

    NodeStatus contains status information about an individual node in the workflow

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    boundaryIDstringBoundaryID indicates the node ID of the associated template root node in which this node belongs to
    childrenArray< string >Children is a list of child node IDs
    daemonedbooleanDaemoned tracks whether or not this node was daemoned and need to be terminated
    displayNamestringDisplayName is a human readable representation of the node. Unique within a template boundary
    estimatedDurationintegerEstimatedDuration in seconds.
    finishedAtTimeTime at which this node completed
    hostNodeNamestringHostNodeName name of the Kubernetes node on which the Pod is running, if applicable
    idstringID is a unique identifier of a node within the worklow It is implemented as a hash of the node name, which makes the ID deterministic
    inputsInputsInputs captures input parameter values and artifact locations supplied to this template invocation
    memoizationStatusMemoizationStatusMemoizationStatus holds information about cached nodes
    messagestringA human readable message indicating details about why the node is in this condition.
    namestringName is unique name in the node tree used to generate the node ID
    nodeFlagNodeFlagNodeFlag tracks some history of node. e.g.) hooked, retried, etc.
    outboundNodesArray< string >OutboundNodes tracks the node IDs which are considered "outbound" nodes to a template invocation. For every invocation of a template, there are nodes which we considered as "outbound". Essentially, these are last nodes in the execution sequence to run, before the template is considered completed. These nodes are then connected as parents to a following step. In the case of single pod steps (i.e. container, script, resource templates), this list will be nil since the pod itself is already considered the "outbound" node. In the case of DAGs, outbound nodes are the "target" tasks (tasks with no children). In the case of steps, outbound nodes are all the containers involved in the last step group. NOTE: since templates are composable, the list of outbound nodes are carried upwards when a DAG/steps template invokes another DAG/steps template. In other words, the outbound nodes of a template, will be a superset of the outbound nodes of its last children.
    outputsOutputsOutputs captures output parameter values and artifact locations produced by this template invocation
    phasestringPhase a simple, high-level summary of where the node is in its lifecycle. Can be used as a state machine. Will be one of these values "Pending", "Running" before the node is completed, or "Succeeded", "Skipped", "Failed", "Error", or "Omitted" as a final state.
    podIPstringPodIP captures the IP of the pod for daemoned steps
    progressstringProgress to completion
    resourcesDurationMap< integer , int64 >ResourcesDuration is indicative, but not accurate, resource duration. This is populated when the nodes completes.
    startedAtTimeTime at which this node started
    synchronizationStatusNodeSynchronizationStatusSynchronizationStatus is the synchronization status of the node
    templateNamestringTemplateName is the template name which this node corresponds to. Not applicable to virtual nodes (e.g. Retry, StepGroup)
    templateRefTemplateRefTemplateRef is the reference to the template resource which this node corresponds to. Not applicable to virtual nodes (e.g. Retry, StepGroup)
    templateScopestringTemplateScope is the template scope in which the template of this node was retrieved.
    typestringType indicates type of node
    -

    Outputs

    -

    Outputs hold parameters, artifacts, and results from a step

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    artifactsArray<Artifact>Artifacts holds the list of output artifacts produced by a step
    exitCodestringExitCode holds the exit code of a script template
    parametersArray<Parameter>Parameters holds the list of output parameters produced by a step
    resultstringResult holds the result (stdout) of a script template
    -

    SynchronizationStatus

    -

    SynchronizationStatus stores the status of semaphore and mutex.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    mutexMutexStatusMutex stores this workflow's mutex holder details
    semaphoreSemaphoreStatusSemaphore stores this workflow's Semaphore holder details
    -

    Artifact

    -

    Artifact indicates an artifact to place at a specified path

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    archiveArchiveStrategyArchive controls how the artifact will be saved to the artifact repository.
    archiveLogsbooleanArchiveLogs indicates if the container logs should be archived
    artifactGCArtifactGCArtifactGC describes the strategy to use when to deleting an artifact from completed or deleted workflows
    artifactoryArtifactoryArtifactArtifactory contains artifactory artifact location details
    azureAzureArtifactAzure contains Azure Storage artifact location details
    deletedbooleanHas this been deleted?
    fromstringFrom allows an artifact to reference an artifact from a previous step
    fromExpressionstringFromExpression, if defined, is evaluated to specify the value for the artifact
    gcsGCSArtifactGCS contains GCS artifact location details
    gitGitArtifactGit contains git artifact location details
    globalNamestringGlobalName exports an output artifact to the global scope, making it available as '{{io.argoproj.workflow.v1alpha1.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts
    hdfsHDFSArtifactHDFS contains HDFS artifact location details
    httpHTTPArtifactHTTP contains HTTP artifact location details
    modeintegermode bits to use on this file, must be a value between 0 and 0777 set when loading input artifacts.
    namestringname of the artifact. must be unique within a template's inputs/outputs.
    optionalbooleanMake Artifacts optional, if Artifacts doesn't generate or exist
    ossOSSArtifactOSS contains OSS artifact location details
    pathstringPath is the container path to the artifact
    rawRawArtifactRaw contains raw artifact location details
    recurseModebooleanIf mode is set, apply the permission recursively into the artifact if it is a folder
    s3S3ArtifactS3 contains S3 artifact location details
    subPathstringSubPath allows an artifact to be sourced from a subpath within the specified source
    -

    Parameter

    -

    Parameter indicate a passed string parameter to a service template with an optional default value

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    defaultstringDefault is the default value to use for an input parameter if a value was not supplied
    descriptionstringDescription is the parameter description
    enumArray< string >Enum holds a list of string values to choose from, for the actual value of the parameter
    globalNamestringGlobalName exports an output parameter to the global scope, making it available as '{{io.argoproj.workflow.v1alpha1.outputs.parameters.XXXX}} and in workflow.status.outputs.parameters
    namestringName is the parameter name
    valuestringValue is the literal value to use for the parameter. If specified in the context of an input parameter, the value takes precedence over any passed values
    valueFromValueFromValueFrom is the source for the output parameter's value
    -

    TemplateRef

    -

    TemplateRef is a reference of template resource.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    clusterScopebooleanClusterScope indicates the referred template is cluster scoped (i.e. a ClusterWorkflowTemplate).
    namestringName is the resource name of the template.
    templatestringTemplate is the name of referred template in the resource.
    -

    Prometheus

    -

    Prometheus is a prometheus metric to be emitted

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    counterCounterCounter is a counter metric
    gaugeGaugeGauge is a gauge metric
    helpstringHelp is a string that describes the metric
    histogramHistogramHistogram is a histogram metric
    labelsArray<MetricLabel>Labels is a list of metric labels
    namestringName is the name of the metric
    whenstringWhen is a conditional statement that decides when to emit the metric
    -

    RetryAffinity

    -

    RetryAffinity prevents running steps on the same host.

    -

    Fields

    - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    nodeAntiAffinityRetryNodeAntiAffinityNo description available
    -

    Backoff

    -

    Backoff is a backoff strategy to use within retryStrategy

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    durationstringDuration is the amount to back off. Default unit is seconds, but could also be a duration (e.g. "2m", "1h")
    factorIntOrStringFactor is a factor to multiply the base duration after each failed retry
    maxDurationstringMaxDuration is the maximum amount of time allowed for a workflow in the backoff strategy
    -

    Mutex

    -

    Mutex holds Mutex configuration

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    namestringname of the mutex
    namespacestringNamespace is the namespace of the mutex, default: [namespace of workflow]
    -

    SemaphoreRef

    -

    SemaphoreRef is a reference of Semaphore

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    configMapKeyRefConfigMapKeySelectorConfigMapKeyRef is configmap selector for Semaphore configuration
    namespacestringNamespace is the namespace of the configmap, default: [namespace of workflow]
    -

    ArtifactLocation

    -

    ArtifactLocation describes a location for a single or multiple artifacts. It is used as single artifact in the context of inputs/outputs (e.g. outputs.artifacts.artname). It is also used to describe the location of multiple artifacts such as the archive location of a single workflow step, which the executor will use as a default location to store its files.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    archiveLogsbooleanArchiveLogs indicates if the container logs should be archived
    artifactoryArtifactoryArtifactArtifactory contains artifactory artifact location details
    azureAzureArtifactAzure contains Azure Storage artifact location details
    gcsGCSArtifactGCS contains GCS artifact location details
    gitGitArtifactGit contains git artifact location details
    hdfsHDFSArtifactHDFS contains HDFS artifact location details
    httpHTTPArtifactHTTP contains HTTP artifact location details
    ossOSSArtifactOSS contains OSS artifact location details
    rawRawArtifactRaw contains raw artifact location details
    s3S3ArtifactS3 contains S3 artifact location details
    -

    ContainerSetTemplate

    -

    No description available

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    containersArray<ContainerNode>No description available
    retryStrategyContainerSetRetryStrategyRetryStrategy describes how to retry a container nodes in the container set if it fails. Nbr of retries(default 0) and sleep duration between retries(default 0s, instant retry) can be set.
    volumeMountsArray<VolumeMount>No description available
    -

    DAGTemplate

    -

    DAGTemplate is a template subtype for directed acyclic graph templates

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    failFastbooleanThis flag is for DAG logic. The DAG logic has a built-in "fail fast" feature to stop scheduling new steps, as soon as it detects that one of the DAG nodes is failed. Then it waits until all DAG nodes are completed before failing the DAG itself. The FailFast flag default is true, if set to false, it will allow a DAG to run all branches of the DAG to completion (either success or failure), regardless of the failed outcomes of branches in the DAG. More info and example about this feature at https://github.com/argoproj/argo-workflows/issues/1442
    targetstringTarget are one or more names of targets to execute in a DAG
    tasksArray<DAGTask>Tasks are a list of DAG tasks
    -

    Data

    -

    Data is a data template

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    sourceDataSourceSource sources external data into a data template
    transformationArray<TransformationStep>Transformation applies a set of transformations
    -

    HTTP

    -

    No description available

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    bodystringBody is content of the HTTP Request
    bodyFromHTTPBodySourceBodyFrom is content of the HTTP Request as Bytes
    headersArray<HTTPHeader>Headers are an optional list of headers to send with HTTP requests
    insecureSkipVerifybooleanInsecureSkipVerify is a bool when if set to true will skip TLS verification for the HTTP client
    methodstringMethod is HTTP methods for HTTP Request
    successConditionstringSuccessCondition is an expression if evaluated to true is considered successful
    timeoutSecondsintegerTimeoutSeconds is request timeout for HTTP Request. Default is 30 seconds
    urlstringURL of the HTTP Request
    -

    UserContainer

    -

    UserContainer is a container specified by a user.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    argsArray< string >Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
    commandArray< string >Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
    envArray<EnvVar>List of environment variables to set in the container. Cannot be updated.
    envFromArray<EnvFromSource>List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.
    imagestringContainer image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.
    imagePullPolicystringImage pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
    lifecycleLifecycleActions that the management system should take in response to container lifecycle events. Cannot be updated.
    livenessProbeProbePeriodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
    mirrorVolumeMountsbooleanMirrorVolumeMounts will mount the same volumes specified in the main container to the container (including artifacts), at the same mountPaths. This enables dind daemon to partially see the same filesystem as the main container in order to use features such as docker volume binding
    namestringName of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
    portsArray<ContainerPort>List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
    readinessProbeProbePeriodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
    resourcesResourceRequirementsCompute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
    securityContextSecurityContextSecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
    startupProbeProbeStartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
    stdinbooleanWhether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.
    stdinOncebooleanWhether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false
    terminationMessagePathstringOptional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.
    terminationMessagePolicystringIndicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.
    ttybooleanWhether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.
    volumeDevicesArray<VolumeDevice>volumeDevices is the list of block devices to be used by the container.
    volumeMountsArray<VolumeMount>Pod volumes to mount into the container's filesystem. Cannot be updated.
    workingDirstringContainer's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.
    -

    Inputs

    -

    Inputs are the mechanism for passing parameters, artifacts, volumes from one template to another

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    artifactsArray<Artifact>Artifact are a list of artifacts passed as inputs
    parametersArray<Parameter>Parameters are a list of parameters passed as inputs
    -

    Memoize

    -

    Memoization enables caching for the Outputs of the template

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    cacheCacheCache sets and configures the kind of cache
    keystringKey is the key to use as the caching key
    maxAgestringMaxAge is the maximum age (e.g. "180s", "24h") of an entry that is still considered valid. If an entry is older than the MaxAge, it will be ignored.
    -

    Plugin

    -

    Plugin is an Object with exactly one key

    -

    ResourceTemplate

    -

    ResourceTemplate is a template subtype to manipulate kubernetes resources

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    actionstringAction is the action to perform to the resource. Must be one of: get, create, apply, delete, replace, patch
    failureConditionstringFailureCondition is a label selector expression which describes the conditions of the k8s resource in which the step was considered failed
    flagsArray< string >Flags is a set of additional options passed to kubectl before submitting a resource I.e. to disable resource validation: flags: [ "--validate=false" # disable resource validation ]
    manifeststringManifest contains the kubernetes manifest
    manifestFromManifestFromManifestFrom is the source for a single kubernetes manifest
    mergeStrategystringMergeStrategy is the strategy used to merge a patch. It defaults to "strategic" Must be one of: strategic, merge, json
    setOwnerReferencebooleanSetOwnerReference sets the reference to the workflow on the OwnerReference of generated resource.
    successConditionstringSuccessCondition is a label selector expression which describes the conditions of the k8s resource in which it is acceptable to proceed to the following step
    -

    ScriptTemplate

    -

    ScriptTemplate is a template subtype to enable scripting through code steps

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    argsArray< string >Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
    commandArray< string >Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
    envArray<EnvVar>List of environment variables to set in the container. Cannot be updated.
    envFromArray<EnvFromSource>List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.
    imagestringContainer image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.
    imagePullPolicystringImage pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
    lifecycleLifecycleActions that the management system should take in response to container lifecycle events. Cannot be updated.
    livenessProbeProbePeriodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
    namestringName of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
    portsArray<ContainerPort>List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
    readinessProbeProbePeriodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
    resourcesResourceRequirementsCompute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
    securityContextSecurityContextSecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
    sourcestringSource contains the source code of the script to execute
    startupProbeProbeStartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
    stdinbooleanWhether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.
    stdinOncebooleanWhether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false
    terminationMessagePathstringOptional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.
    terminationMessagePolicystringIndicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.
    ttybooleanWhether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.
    volumeDevicesArray<VolumeDevice>volumeDevices is the list of block devices to be used by the container.
    volumeMountsArray<VolumeMount>Pod volumes to mount into the container's filesystem. Cannot be updated.
    workingDirstringContainer's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.
    -

    WorkflowStep

    -

    WorkflowStep is a reference to a template to execute in a series of step

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    argumentsArgumentsArguments hold arguments to the template
    continueOnContinueOnContinueOn makes argo to proceed with the following step even if this step fails. Errors and Failed states can be specified
    hooksLifecycleHookHooks holds the lifecycle hook which is invoked at lifecycle of step, irrespective of the success, failure, or error status of the primary step
    inlineTemplateInline is the template. Template must be empty if this is declared (and vice-versa).
    namestringName of the step
    ~~onExit~~~~string~~~~OnExit is a template reference which is invoked at the end of the template, irrespective of the success, failure, or error of the primary template.~~ DEPRECATED: Use Hooks[exit].Template instead.
    templatestringTemplate is the name of the template to execute as the step
    templateRefTemplateRefTemplateRef is the reference to the template resource to execute as the step.
    whenstringWhen is an expression in which the step should conditionally execute
    withItemsArray<Item>WithItems expands a step into multiple parallel steps from the items in the list
    withParamstringWithParam expands a step into multiple parallel steps from the value in the parameter, which is expected to be a JSON list.
    withSequenceSequenceWithSequence expands a step into a numeric sequence
    -

    SuspendTemplate

    -

    SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    durationstringDuration is the seconds to wait before automatically resuming a template. Must be a string. Default unit is seconds. Could also be a Duration, e.g.: "2m", "6h"
    -

    LabelValueFrom

    -

    No description available

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    expressionstringNo description available
    -

    ArtifactRepository

    -

    ArtifactRepository represents an artifact repository in which a controller will store its artifacts

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    archiveLogsbooleanArchiveLogs enables log archiving
    artifactoryArtifactoryArtifactRepositoryArtifactory stores artifacts to JFrog Artifactory
    azureAzureArtifactRepositoryAzure stores artifact in an Azure Storage account
    gcsGCSArtifactRepositoryGCS stores artifact in a GCS object store
    hdfsHDFSArtifactRepositoryHDFS stores artifacts in HDFS
    ossOSSArtifactRepositoryOSS stores artifact in a OSS-compliant object store
    s3S3ArtifactRepositoryS3 stores artifact in a S3-compliant object store
    -

    MemoizationStatus

    -

    MemoizationStatus is the status of this memoized node

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    cacheNamestringCache is the name of the cache that was used
    hitbooleanHit indicates whether this node was created from a cache entry
    keystringKey is the name of the key used for this node's cache
    -

    NodeFlag

    -

    No description available

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    hookedbooleanHooked tracks whether or not this node was triggered by hook or onExit
    retriedbooleanRetried tracks whether or not this node was retried by retryStrategy
    -

    NodeSynchronizationStatus

    -

    NodeSynchronizationStatus stores the status of a node

    -

    Fields

    - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    waitingstringWaiting is the name of the lock that this node is waiting for
    -

    MutexStatus

    -

    MutexStatus contains which objects hold mutex locks, and which objects this workflow is waiting on to release locks.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    holdingArray<MutexHolding>Holding is a list of mutexes and their respective objects that are held by mutex lock for this io.argoproj.workflow.v1alpha1.
    waitingArray<MutexHolding>Waiting is a list of mutexes and their respective objects this workflow is waiting for.
    -

    SemaphoreStatus

    -

    No description available

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    holdingArray<SemaphoreHolding>Holding stores the list of resource acquired synchronization lock for workflows.
    waitingArray<SemaphoreHolding>Waiting indicates the list of current synchronization lock holders.
    -

    ArchiveStrategy

    -

    ArchiveStrategy describes how to archive files/directory when saving artifacts

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    noneNoneStrategyNo description available
    tarTarStrategyNo description available
    zipZipStrategyNo description available
    -

    ArtifactGC

    -

    ArtifactGC describes how to delete artifacts from completed Workflows - this is embedded into the WorkflowLevelArtifactGC, and also used for individual Artifacts to override that as needed

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    podMetadataMetadataPodMetadata is an optional field for specifying the Labels and Annotations that should be assigned to the Pod doing the deletion
    serviceAccountNamestringServiceAccountName is an optional field for specifying the Service Account that should be assigned to the Pod doing the deletion
    strategystringStrategy is the strategy to use.
    -

    ArtifactoryArtifact

    -

    ArtifactoryArtifact is the location of an artifactory artifact

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    passwordSecretSecretKeySelectorPasswordSecret is the secret selector to the repository password
    urlstringURL of the artifact
    usernameSecretSecretKeySelectorUsernameSecret is the secret selector to the repository username
    -

    AzureArtifact

    -

    AzureArtifact is the location of a an Azure Storage artifact

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    accountKeySecretSecretKeySelectorAccountKeySecret is the secret selector to the Azure Blob Storage account access key
    blobstringBlob is the blob name (i.e., path) in the container where the artifact resides
    containerstringContainer is the container where resources will be stored
    endpointstringEndpoint is the service url associated with an account. It is most likely "https://.blob.core.windows.net"
    useSDKCredsbooleanUseSDKCreds tells the driver to figure out credentials based on sdk defaults.
    -

    GCSArtifact

    -

    GCSArtifact is the location of a GCS artifact

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    bucketstringBucket is the name of the bucket
    keystringKey is the path in the bucket where the artifact resides
    serviceAccountKeySecretSecretKeySelectorServiceAccountKeySecret is the secret selector to the bucket's service account key
    -

    GitArtifact

    -

    GitArtifact is the location of an git artifact

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    branchstringBranch is the branch to fetch when SingleBranch is enabled
    depthintegerDepth specifies clones/fetches should be shallow and include the given number of commits from the branch tip
    disableSubmodulesbooleanDisableSubmodules disables submodules during git clone
    fetchArray< string >Fetch specifies a number of refs that should be fetched before checkout
    insecureIgnoreHostKeybooleanInsecureIgnoreHostKey disables SSH strict host key checking during git clone
    passwordSecretSecretKeySelectorPasswordSecret is the secret selector to the repository password
    repostringRepo is the git repository
    revisionstringRevision is the git commit, tag, branch to checkout
    singleBranchbooleanSingleBranch enables single branch clone, using the branch parameter
    sshPrivateKeySecretSecretKeySelectorSSHPrivateKeySecret is the secret selector to the repository ssh private key
    usernameSecretSecretKeySelectorUsernameSecret is the secret selector to the repository username
    -

    HDFSArtifact

    -

    HDFSArtifact is the location of an HDFS artifact

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    addressesArray< string >Addresses is accessible addresses of HDFS name nodes
    forcebooleanForce copies a file forcibly even if it exists
    hdfsUserstringHDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.
    krbCCacheSecretSecretKeySelectorKrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.
    krbConfigConfigMapConfigMapKeySelectorKrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.
    krbKeytabSecretSecretKeySelectorKrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.
    krbRealmstringKrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.
    krbServicePrincipalNamestringKrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.
    krbUsernamestringKrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.
    pathstringPath is a file path in HDFS
    -

    HTTPArtifact

    -

    HTTPArtifact allows a file served on HTTP to be placed as an input artifact in a container

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    authHTTPAuthAuth contains information for client authentication
    headersArray<Header>Headers are an optional list of headers to send with HTTP requests for artifacts
    urlstringURL of the artifact
    -

    OSSArtifact

    -

    OSSArtifact is the location of an Alibaba Cloud OSS artifact

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    accessKeySecretSecretKeySelectorAccessKeySecret is the secret selector to the bucket's access key
    bucketstringBucket is the name of the bucket
    createBucketIfNotPresentbooleanCreateBucketIfNotPresent tells the driver to attempt to create the OSS bucket for output artifacts, if it doesn't exist
    endpointstringEndpoint is the hostname of the bucket endpoint
    keystringKey is the path in the bucket where the artifact resides
    lifecycleRuleOSSLifecycleRuleLifecycleRule specifies how to manage bucket's lifecycle
    secretKeySecretSecretKeySelectorSecretKeySecret is the secret selector to the bucket's secret key
    securityTokenstringSecurityToken is the user's temporary security token. For more details, check out: https://www.alibabacloud.com/help/doc-detail/100624.htm
    useSDKCredsbooleanUseSDKCreds tells the driver to figure out credentials based on sdk defaults.
    -

    RawArtifact

    -

    RawArtifact allows raw string content to be placed as an artifact in a container

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    datastringData is the string contents of the artifact
    -

    S3Artifact

    -

    S3Artifact is the location of an S3 artifact

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    accessKeySecretSecretKeySelectorAccessKeySecret is the secret selector to the bucket's access key
    bucketstringBucket is the name of the bucket
    caSecretSecretKeySelectorCASecret specifies the secret that contains the CA, used to verify the TLS connection
    createBucketIfNotPresentCreateS3BucketOptionsCreateBucketIfNotPresent tells the driver to attempt to create the S3 bucket for output artifacts, if it doesn't exist. Setting Enabled Encryption will apply either SSE-S3 to the bucket if KmsKeyId is not set or SSE-KMS if it is.
    encryptionOptionsS3EncryptionOptionsNo description available
    endpointstringEndpoint is the hostname of the bucket endpoint
    insecurebooleanInsecure will connect to the service with TLS
    keystringKey is the key in the bucket where the artifact resides
    regionstringRegion contains the optional bucket region
    roleARNstringRoleARN is the Amazon Resource Name (ARN) of the role to assume.
    secretKeySecretSecretKeySelectorSecretKeySecret is the secret selector to the bucket's secret key
    useSDKCredsbooleanUseSDKCreds tells the driver to figure out credentials based on sdk defaults.
    -

    ValueFrom

    -

    ValueFrom describes a location in which to obtain the value to a parameter

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    configMapKeyRefConfigMapKeySelectorConfigMapKeyRef is configmap selector for input parameter configuration
    defaultstringDefault specifies a value to be used if retrieving the value from the specified source fails
    eventstringSelector (https://github.com/antonmedv/expr) that is evaluated against the event to get the value of the parameter. E.g. payload.message
    expressionstringExpression, if defined, is evaluated to specify the value for the parameter
    jqFilterstringJQFilter expression against the resource object in resource templates
    jsonPathstringJSONPath of a resource to retrieve an output parameter value from in resource templates
    parameterstringParameter reference to a step or dag task in which to retrieve an output parameter value from (e.g. '{{steps.mystep.outputs.myparam}}')
    pathstringPath in the container to retrieve an output parameter value from in container templates
    suppliedSuppliedValueFromSupplied value to be filled in directly, either through the CLI, API, etc.
    -

    Counter

    -

    Counter is a Counter prometheus metric

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    valuestringValue is the value of the metric
    -

    Gauge

    -

    Gauge is a Gauge prometheus metric

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    operationstringOperation defines the operation to apply with value and the metrics' current value
    realtimebooleanRealtime emits this metric in real time if applicable
    valuestringValue is the value to be used in the operation with the metric's current value. If no operation is set, value is the value of the metric
    -

    Histogram

    -

    Histogram is a Histogram prometheus metric

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    bucketsArray<Amount>Buckets is a list of bucket divisors for the histogram
    valuestringValue is the value of the metric
    -

    MetricLabel

    -

    MetricLabel is a single label for a prometheus metric

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    keystringNo description available
    valuestringNo description available
    -

    RetryNodeAntiAffinity

    -

    RetryNodeAntiAffinity is a placeholder for future expansion, only empty nodeAntiAffinity is allowed. In order to prevent running steps on the same host, it uses "kubernetes.io/hostname".

    -

    ContainerNode

    -

    No description available

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    argsArray< string >Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
    commandArray< string >Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
    dependenciesArray< string >No description available
    envArray<EnvVar>List of environment variables to set in the container. Cannot be updated.
    envFromArray<EnvFromSource>List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.
    imagestringContainer image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.
    imagePullPolicystringImage pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
    lifecycleLifecycleActions that the management system should take in response to container lifecycle events. Cannot be updated.
    livenessProbeProbePeriodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
    namestringName of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
    portsArray<ContainerPort>List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
    readinessProbeProbePeriodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
    resourcesResourceRequirementsCompute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
    securityContextSecurityContextSecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
    startupProbeProbeStartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
    stdinbooleanWhether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.
    stdinOncebooleanWhether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false
    terminationMessagePathstringOptional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.
    terminationMessagePolicystringIndicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.
    ttybooleanWhether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.
    volumeDevicesArray<VolumeDevice>volumeDevices is the list of block devices to be used by the container.
    volumeMountsArray<VolumeMount>Pod volumes to mount into the container's filesystem. Cannot be updated.
    workingDirstringContainer's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.
    -

    ContainerSetRetryStrategy

    -

    No description available

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    durationstringDuration is the time between each retry, examples values are "300ms", "1s" or "5m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
    retriesIntOrStringNbr of retries
    -

    DAGTask

    -

    DAGTask represents a node in the graph during DAG execution

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    argumentsArgumentsArguments are the parameter and artifact arguments to the template
    continueOnContinueOnContinueOn makes argo to proceed with the following step even if this step fails. Errors and Failed states can be specified
    dependenciesArray< string >Dependencies are name of other targets which this depends on
    dependsstringDepends are name of other targets which this depends on
    hooksLifecycleHookHooks hold the lifecycle hook which is invoked at lifecycle of task, irrespective of the success, failure, or error status of the primary task
    inlineTemplateInline is the template. Template must be empty if this is declared (and vice-versa).
    namestringName is the name of the target
    ~~onExit~~~~string~~~~OnExit is a template reference which is invoked at the end of the template, irrespective of the success, failure, or error of the primary template.~~ DEPRECATED: Use Hooks[exit].Template instead.
    templatestringName of template to execute
    templateRefTemplateRefTemplateRef is the reference to the template resource to execute.
    whenstringWhen is an expression in which the task should conditionally execute
    withItemsArray<Item>WithItems expands a task into multiple parallel tasks from the items in the list
    withParamstringWithParam expands a task into multiple parallel tasks from the value in the parameter, which is expected to be a JSON list.
    withSequenceSequenceWithSequence expands a task into a numeric sequence
    -

    DataSource

    -

    DataSource sources external data into a data template

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    artifactPathsArtifactPathsArtifactPaths is a data transformation that collects a list of artifact paths
    -

    TransformationStep

    -

    No description available

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    expressionstringExpression defines an expr expression to apply
    -

    HTTPBodySource

    -

    HTTPBodySource contains the source of the HTTP body.

    -

    Fields

    - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    bytesbyteNo description available
    -

    HTTPHeader

    -

    No description available

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    namestringNo description available
    valuestringNo description available
    valueFromHTTPHeaderSourceNo description available
    -

    Cache

    -

    Cache is the configuration for the type of cache to be used

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    configMapConfigMapKeySelectorConfigMap sets a ConfigMap-based cache
    -

    ManifestFrom

    -

    No description available

    -

    Fields

    - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    artifactArtifactArtifact contains the artifact to use
    -

    ContinueOn

    -

    ContinueOn defines if a workflow should continue even if a task or step fails/errors. It can be specified if the workflow should continue when the pod errors, fails or both.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    errorbooleanNo description available
    failedbooleanNo description available
    -

    Item

    -

    Item expands a single workflow step into multiple parallel steps The value of Item can be a map, string, bool, or number

    -
    -Examples with this field (click to open) -
    - -
    - -

    Sequence

    -

    Sequence expands a workflow step into numeric range

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    countIntOrStringCount is number of elements in the sequence (default: 0). Not to be used with end
    endIntOrStringNumber at which to end the sequence (default: 0). Not to be used with Count
    formatstringFormat is a printf format string to format the value in the sequence
    startIntOrStringNumber at which to start the sequence (default: 0)
    -

    ArtifactoryArtifactRepository

    -

    ArtifactoryArtifactRepository defines the controller configuration for an artifactory artifact repository

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    keyFormatstringKeyFormat defines the format of how to store keys and can reference workflow variables.
    passwordSecretSecretKeySelectorPasswordSecret is the secret selector to the repository password
    repoURLstringRepoURL is the url for artifactory repo.
    usernameSecretSecretKeySelectorUsernameSecret is the secret selector to the repository username
    -

    AzureArtifactRepository

    -

    AzureArtifactRepository defines the controller configuration for an Azure Blob Storage artifact repository

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    accountKeySecretSecretKeySelectorAccountKeySecret is the secret selector to the Azure Blob Storage account access key
    blobNameFormatstringBlobNameFormat is defines the format of how to store blob names. Can reference workflow variables
    containerstringContainer is the container where resources will be stored
    endpointstringEndpoint is the service url associated with an account. It is most likely "https://.blob.core.windows.net"
    useSDKCredsbooleanUseSDKCreds tells the driver to figure out credentials based on sdk defaults.
    -

    GCSArtifactRepository

    -

    GCSArtifactRepository defines the controller configuration for a GCS artifact repository

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    bucketstringBucket is the name of the bucket
    keyFormatstringKeyFormat defines the format of how to store keys and can reference workflow variables.
    serviceAccountKeySecretSecretKeySelectorServiceAccountKeySecret is the secret selector to the bucket's service account key
    -

    HDFSArtifactRepository

    -

    HDFSArtifactRepository defines the controller configuration for an HDFS artifact repository

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    addressesArray< string >Addresses is accessible addresses of HDFS name nodes
    forcebooleanForce copies a file forcibly even if it exists
    hdfsUserstringHDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.
    krbCCacheSecretSecretKeySelectorKrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.
    krbConfigConfigMapConfigMapKeySelectorKrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.
    krbKeytabSecretSecretKeySelectorKrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.
    krbRealmstringKrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.
    krbServicePrincipalNamestringKrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.
    krbUsernamestringKrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.
    pathFormatstringPathFormat is defines the format of path to store a file. Can reference workflow variables
    -

    OSSArtifactRepository

    -

    OSSArtifactRepository defines the controller configuration for an OSS artifact repository

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    accessKeySecretSecretKeySelectorAccessKeySecret is the secret selector to the bucket's access key
    bucketstringBucket is the name of the bucket
    createBucketIfNotPresentbooleanCreateBucketIfNotPresent tells the driver to attempt to create the OSS bucket for output artifacts, if it doesn't exist
    endpointstringEndpoint is the hostname of the bucket endpoint
    keyFormatstringKeyFormat defines the format of how to store keys and can reference workflow variables.
    lifecycleRuleOSSLifecycleRuleLifecycleRule specifies how to manage bucket's lifecycle
    secretKeySecretSecretKeySelectorSecretKeySecret is the secret selector to the bucket's secret key
    securityTokenstringSecurityToken is the user's temporary security token. For more details, check out: https://www.alibabacloud.com/help/doc-detail/100624.htm
    useSDKCredsbooleanUseSDKCreds tells the driver to figure out credentials based on sdk defaults.
    -

    S3ArtifactRepository

    -

    S3ArtifactRepository defines the controller configuration for an S3 artifact repository

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    accessKeySecretSecretKeySelectorAccessKeySecret is the secret selector to the bucket's access key
    bucketstringBucket is the name of the bucket
    caSecretSecretKeySelectorCASecret specifies the secret that contains the CA, used to verify the TLS connection
    createBucketIfNotPresentCreateS3BucketOptionsCreateBucketIfNotPresent tells the driver to attempt to create the S3 bucket for output artifacts, if it doesn't exist. Setting Enabled Encryption will apply either SSE-S3 to the bucket if KmsKeyId is not set or SSE-KMS if it is.
    encryptionOptionsS3EncryptionOptionsNo description available
    endpointstringEndpoint is the hostname of the bucket endpoint
    insecurebooleanInsecure will connect to the service with TLS
    keyFormatstringKeyFormat defines the format of how to store keys and can reference workflow variables.
    ~~keyPrefix~~~~string~~~~KeyPrefix is prefix used as part of the bucket key in which the controller will store artifacts.~~ DEPRECATED. Use KeyFormat instead
    regionstringRegion contains the optional bucket region
    roleARNstringRoleARN is the Amazon Resource Name (ARN) of the role to assume.
    secretKeySecretSecretKeySelectorSecretKeySecret is the secret selector to the bucket's secret key
    useSDKCredsbooleanUseSDKCreds tells the driver to figure out credentials based on sdk defaults.
    -

    MutexHolding

    -

    MutexHolding describes the mutex and the object which is holding it.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    holderstringHolder is a reference to the object which holds the Mutex. Holding Scenario: 1. Current workflow's NodeID which is holding the lock. e.g: ${NodeID} Waiting Scenario: 1. Current workflow or other workflow NodeID which is holding the lock. e.g: ${WorkflowName}/${NodeID}
    mutexstringReference for the mutex e.g: ${namespace}/mutex/${mutexName}
    -

    SemaphoreHolding

    -

    No description available

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    holdersArray< string >Holders stores the list of current holder names in the io.argoproj.workflow.v1alpha1.
    semaphorestringSemaphore stores the semaphore name.
    -

    NoneStrategy

    -

    NoneStrategy indicates to skip tar process and upload the files or directory tree as independent files. Note that if the artifact is a directory, the artifact driver must support the ability to save/load the directory appropriately.

    -
    -Examples with this field (click to open) -
    - -
    - -

    TarStrategy

    -

    TarStrategy will tar and gzip the file or directory when saving

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    compressionLevelintegerCompressionLevel specifies the gzip compression level to use for the artifact. Defaults to gzip.DefaultCompression.
    -

    ZipStrategy

    -

    ZipStrategy will unzip zipped input artifacts

    -

    HTTPAuth

    -

    No description available

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    basicAuthBasicAuthNo description available
    clientCertClientCertAuthNo description available
    oauth2OAuth2AuthNo description available
    - -

    Header indicate a key-value request header to be used when fetching artifacts over HTTP

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    namestringName is the header name
    valuestringValue is the literal value to use for the header
    -

    OSSLifecycleRule

    -

    OSSLifecycleRule specifies how to manage bucket's lifecycle

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    markDeletionAfterDaysintegerMarkDeletionAfterDays is the number of days before we delete objects in the bucket
    markInfrequentAccessAfterDaysintegerMarkInfrequentAccessAfterDays is the number of days before we convert the objects in the bucket to Infrequent Access (IA) storage type
    -

    CreateS3BucketOptions

    -

    CreateS3BucketOptions options used to determine automatic automatic bucket-creation process

    -

    Fields

    - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    objectLockingbooleanObjectLocking Enable object locking
    -

    S3EncryptionOptions

    -

    S3EncryptionOptions used to determine encryption options during s3 operations

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    enableEncryptionbooleanEnableEncryption tells the driver to encrypt objects if set to true. If kmsKeyId and serverSideCustomerKeySecret are not set, SSE-S3 will be used
    kmsEncryptionContextstringKmsEncryptionContext is a json blob that contains an encryption context. See https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context for more information
    kmsKeyIdstringKMSKeyId tells the driver to encrypt the object using the specified KMS Key.
    serverSideCustomerKeySecretSecretKeySelectorServerSideCustomerKeySecret tells the driver to encrypt the output artifacts using SSE-C with the specified secret.
    -

    SuppliedValueFrom

    -

    SuppliedValueFrom is a placeholder for a value to be filled in directly, either through the CLI, API, etc.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Amount

    -

    Amount represent a numeric amount.

    -
    -Examples with this field (click to open) -
    - -
    - -

    ArtifactPaths

    -

    ArtifactPaths expands a step from a collection of artifacts

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    archiveArchiveStrategyArchive controls how the artifact will be saved to the artifact repository.
    archiveLogsbooleanArchiveLogs indicates if the container logs should be archived
    artifactGCArtifactGCArtifactGC describes the strategy to use when to deleting an artifact from completed or deleted workflows
    artifactoryArtifactoryArtifactArtifactory contains artifactory artifact location details
    azureAzureArtifactAzure contains Azure Storage artifact location details
    deletedbooleanHas this been deleted?
    fromstringFrom allows an artifact to reference an artifact from a previous step
    fromExpressionstringFromExpression, if defined, is evaluated to specify the value for the artifact
    gcsGCSArtifactGCS contains GCS artifact location details
    gitGitArtifactGit contains git artifact location details
    globalNamestringGlobalName exports an output artifact to the global scope, making it available as '{{io.argoproj.workflow.v1alpha1.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts
    hdfsHDFSArtifactHDFS contains HDFS artifact location details
    httpHTTPArtifactHTTP contains HTTP artifact location details
    modeintegermode bits to use on this file, must be a value between 0 and 0777 set when loading input artifacts.
    namestringname of the artifact. must be unique within a template's inputs/outputs.
    optionalbooleanMake Artifacts optional, if Artifacts doesn't generate or exist
    ossOSSArtifactOSS contains OSS artifact location details
    pathstringPath is the container path to the artifact
    rawRawArtifactRaw contains raw artifact location details
    recurseModebooleanIf mode is set, apply the permission recursively into the artifact if it is a folder
    s3S3ArtifactS3 contains S3 artifact location details
    subPathstringSubPath allows an artifact to be sourced from a subpath within the specified source
    -

    HTTPHeaderSource

    -

    No description available

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    secretKeyRefSecretKeySelectorNo description available
    -

    BasicAuth

    -

    BasicAuth describes the secret selectors required for basic authentication

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    passwordSecretSecretKeySelectorPasswordSecret is the secret selector to the repository password
    usernameSecretSecretKeySelectorUsernameSecret is the secret selector to the repository username
    -

    ClientCertAuth

    -

    ClientCertAuth holds necessary information for client authentication via certificates

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    clientCertSecretSecretKeySelectorNo description available
    clientKeySecretSecretKeySelectorNo description available
    -

    OAuth2Auth

    -

    OAuth2Auth holds all information for client authentication via OAuth2 tokens

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    clientIDSecretSecretKeySelectorNo description available
    clientSecretSecretSecretKeySelectorNo description available
    endpointParamsArray<OAuth2EndpointParam>No description available
    scopesArray< string >No description available
    tokenURLSecretSecretKeySelectorNo description available
    -

    OAuth2EndpointParam

    -

    EndpointParam is for requesting optional fields that should be sent in the oauth request

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    keystringName is the header name
    valuestringValue is the literal value to use for the header
    -

    External Fields

    -

    ObjectMeta

    -

    ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    annotationsMap< string , string >Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations
    clusterNamestringThe name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.
    creationTimestampTimeCreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    deletionGracePeriodSecondsintegerNumber of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.
    deletionTimestampTimeDeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    finalizersArray< string >Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.
    generateNamestringGenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency
    generationintegerA sequence number representing a specific generation of the desired state. Populated by the system. Read-only.
    labelsMap< string , string >Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels
    managedFieldsArray<ManagedFieldsEntry>ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like "ci-cd". The set of fields is always in the version that the workflow used when modifying the object.
    namestringName must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names
    namespacestringNamespace defines the space within which each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces
    ownerReferencesArray<OwnerReference>List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.
    resourceVersionstringAn opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
    ~~selfLink~~~~string~~~~SelfLink is a URL representing this object. Populated by the system. Read-only.~~ DEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.
    uidstringUID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids
    -

    Affinity

    -

    Affinity is a group of affinity scheduling rules.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    nodeAffinityNodeAffinityDescribes node affinity scheduling rules for the pod.
    podAffinityPodAffinityDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
    podAntiAffinityPodAntiAffinityDescribes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
    -

    PodDNSConfig

    -

    PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    nameserversArray< string >A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.
    optionsArray<PodDNSConfigOption>A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.
    searchesArray< string >A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.
    -

    HostAlias

    -

    HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    hostnamesArray< string >Hostnames for the above IP address.
    ipstringIP address of the host file entry.
    -

    LocalObjectReference

    -

    LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
    -

    PodDisruptionBudgetSpec

    -

    PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    maxUnavailableIntOrStringAn eviction is allowed if at most "maxUnavailable" pods selected by "selector" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with "minAvailable".
    minAvailableIntOrStringAn eviction is allowed if at least "minAvailable" pods selected by "selector" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying "100%".
    selectorLabelSelectorLabel query over pods whose evictions are managed by the disruption budget. A null selector will match no pods, while an empty ({}) selector will select all pods within the namespace.
    -

    PodSecurityContext

    -

    PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    fsGroupintegerA special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- If unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.
    fsGroupChangePolicystringfsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. Note that this field cannot be set when spec.os.name is windows.
    runAsGroupintegerThe GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.
    runAsNonRootbooleanIndicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
    runAsUserintegerThe UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.
    seLinuxOptionsSELinuxOptionsThe SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.
    seccompProfileSeccompProfileThe seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.
    supplementalGroupsArray< integer >A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows.
    sysctlsArray<Sysctl>Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.
    windowsOptionsWindowsSecurityContextOptionsThe Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.
    -

    Toleration

    -

    The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    effectstringEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. Possible enum values: - "NoExecute" Evict any already-running pods that do not tolerate the taint. Currently enforced by NodeController. - "NoSchedule" Do not allow new pods to schedule onto the node unless they tolerate the taint, but allow all pods submitted to Kubelet without going through the scheduler to start, and allow all already-running pods to continue running. Enforced by the scheduler. - "PreferNoSchedule" Like TaintEffectNoSchedule, but the scheduler tries not to schedule new pods onto the node, rather than prohibiting new pods from scheduling onto the node entirely. Enforced by the scheduler.
    keystringKey is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
    operatorstringOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. Possible enum values: - "Equal" - "Exists"
    tolerationSecondsintegerTolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
    valuestringValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
    -

    PersistentVolumeClaim

    -

    PersistentVolumeClaim is a user's request for and claim to a persistent volume

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    apiVersionstringAPIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
    kindstringKind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
    metadataObjectMetaStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
    specPersistentVolumeClaimSpecSpec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
    statusPersistentVolumeClaimStatusStatus represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
    -

    Volume

    -

    Volume represents a named volume in a pod that may be accessed by any container in the pod.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    awsElasticBlockStoreAWSElasticBlockStoreVolumeSourceAWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
    azureDiskAzureDiskVolumeSourceAzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
    azureFileAzureFileVolumeSourceAzureFile represents an Azure File Service mount on the host and bind mount to the pod.
    cephfsCephFSVolumeSourceCephFS represents a Ceph FS mount on the host that shares a pod's lifetime
    cinderCinderVolumeSourceCinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md
    configMapConfigMapVolumeSourceConfigMap represents a configMap that should populate this volume
    csiCSIVolumeSourceCSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).
    downwardAPIDownwardAPIVolumeSourceDownwardAPI represents downward API about the pod that should populate this volume
    emptyDirEmptyDirVolumeSourceEmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
    ephemeralEphemeralVolumeSourceEphemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity tracking are needed, c) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource for more information on the connection between this volume type and PersistentVolumeClaim). Use PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod. Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. A pod can use both types of ephemeral volumes and persistent volumes at the same time.
    fcFCVolumeSourceFC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
    flexVolumeFlexVolumeSourceFlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.
    flockerFlockerVolumeSourceFlocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
    gcePersistentDiskGCEPersistentDiskVolumeSourceGCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
    ~~gitRepo~~~~GitRepoVolumeSource~~~~GitRepo represents a git repository at a particular revision.~~ DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.
    glusterfsGlusterfsVolumeSourceGlusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md
    hostPathHostPathVolumeSourceHostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
    iscsiISCSIVolumeSourceISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md
    namestringVolume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
    nfsNFSVolumeSourceNFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
    persistentVolumeClaimPersistentVolumeClaimVolumeSourcePersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
    photonPersistentDiskPhotonPersistentDiskVolumeSourcePhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
    portworxVolumePortworxVolumeSourcePortworxVolume represents a portworx volume attached and mounted on kubelets host machine
    projectedProjectedVolumeSourceItems for all in one resources secrets, configmaps, and downward API
    quobyteQuobyteVolumeSourceQuobyte represents a Quobyte mount on the host that shares a pod's lifetime
    rbdRBDVolumeSourceRBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md
    scaleIOScaleIOVolumeSourceScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
    secretSecretVolumeSourceSecret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
    storageosStorageOSVolumeSourceStorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
    vsphereVolumeVsphereVirtualDiskVolumeSourceVsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
    -

    Time

    -

    Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.

    -

    ObjectReference

    -

    ObjectReference contains enough information to let you inspect or modify the referred object.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    apiVersionstringAPI version of the referent.
    fieldPathstringIf referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.
    kindstringKind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
    namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
    namespacestringNamespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
    resourceVersionstringSpecific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
    uidstringUID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
    -

    Duration

    -

    Duration is a wrapper around time.Duration which supports correct marshaling to YAML and JSON. In particular, it marshals into strings, which can be used as map keys in json.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    durationstringNo description available
    -

    LabelSelector

    -

    A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    matchExpressionsArray<LabelSelectorRequirement>matchExpressions is a list of label selector requirements. The requirements are ANDed.
    matchLabelsMap< string , string >matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
    -

    IntOrString

    -

    No description available

    -
    -Examples with this field (click to open) -
    - -
    - -

    Container

    -

    A single application container that you want to run within a pod.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    argsArray< string >Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
    commandArray< string >Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
    envArray<EnvVar>List of environment variables to set in the container. Cannot be updated.
    envFromArray<EnvFromSource>List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.
    imagestringDocker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.
    imagePullPolicystringImage pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images Possible enum values: - "Always" means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. - "IfNotPresent" means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. - "Never" means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present
    lifecycleLifecycleActions that the management system should take in response to container lifecycle events. Cannot be updated.
    livenessProbeProbePeriodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
    namestringName of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
    portsArray<ContainerPort>List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
    readinessProbeProbePeriodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
    resourcesResourceRequirementsCompute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
    securityContextSecurityContextSecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
    startupProbeProbeStartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
    stdinbooleanWhether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.
    stdinOncebooleanWhether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false
    terminationMessagePathstringOptional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.
    terminationMessagePolicystringIndicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. Possible enum values: - "FallbackToLogsOnError" will read the most recent contents of the container logs for the container status message when the container exits with an error and the terminationMessagePath has no contents. - "File" is the default behavior and will set the container status message to the contents of the container's terminationMessagePath when the container exits.
    ttybooleanWhether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.
    volumeDevicesArray<VolumeDevice>volumeDevices is the list of block devices to be used by the container.
    volumeMountsArray<VolumeMount>Pod volumes to mount into the container's filesystem. Cannot be updated.
    workingDirstringContainer's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.
    -

    ConfigMapKeySelector

    -

    Selects a key from a ConfigMap.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    keystringThe key to select.
    namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
    optionalbooleanSpecify whether the ConfigMap or its key must be defined
    -

    VolumeMount

    -

    VolumeMount describes a mounting of a Volume within a container.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    mountPathstringPath within the container at which the volume should be mounted. Must not contain ':'.
    mountPropagationstringmountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
    namestringThis must match the Name of a Volume.
    readOnlybooleanMounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
    subPathstringPath within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
    subPathExprstringExpanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.
    -

    EnvVar

    -

    EnvVar represents an environment variable present in a Container.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    namestringName of the environment variable. Must be a C_IDENTIFIER.
    valuestringVariable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".
    valueFromEnvVarSourceSource for the environment variable's value. Cannot be used if value is not empty.
    -

    EnvFromSource

    -

    EnvFromSource represents the source of a set of ConfigMaps

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    configMapRefConfigMapEnvSourceThe ConfigMap to select from
    prefixstringAn optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
    secretRefSecretEnvSourceThe Secret to select from
    -

    Lifecycle

    -

    Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    postStartLifecycleHandlerPostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
    preStopLifecycleHandlerPreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
    -

    Probe

    -

    Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    execExecActionExec specifies the action to take.
    failureThresholdintegerMinimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
    grpcGRPCActionGRPC specifies an action involving a GRPC port. This is an alpha field and requires enabling GRPCContainerProbe feature gate.
    httpGetHTTPGetActionHTTPGet specifies the http request to perform.
    initialDelaySecondsintegerNumber of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
    periodSecondsintegerHow often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
    successThresholdintegerMinimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
    tcpSocketTCPSocketActionTCPSocket specifies an action involving a TCP port.
    terminationGracePeriodSecondsintegerOptional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
    timeoutSecondsintegerNumber of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
    -

    ContainerPort

    -

    ContainerPort represents a network port in a single container.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    containerPortintegerNumber of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.
    hostIPstringWhat host IP to bind the external port to.
    hostPortintegerNumber of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.
    namestringIf specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.
    protocolstringProtocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". Possible enum values: - "SCTP" is the SCTP protocol. - "TCP" is the TCP protocol. - "UDP" is the UDP protocol.
    -

    ResourceRequirements

    -

    ResourceRequirements describes the compute resource requirements.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    limitsQuantityLimits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
    requestsQuantityRequests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
    -

    SecurityContext

    -

    SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    allowPrivilegeEscalationbooleanAllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.
    capabilitiesCapabilitiesThe capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows.
    privilegedbooleanRun container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows.
    procMountstringprocMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.
    readOnlyRootFilesystembooleanWhether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.
    runAsGroupintegerThe GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.
    runAsNonRootbooleanIndicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
    runAsUserintegerThe UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.
    seLinuxOptionsSELinuxOptionsThe SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.
    seccompProfileSeccompProfileThe seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.
    windowsOptionsWindowsSecurityContextOptionsThe Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.
    -

    VolumeDevice

    -

    volumeDevice describes a mapping of a raw block device within a container.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    devicePathstringdevicePath is the path inside of the container that the device will be mapped to.
    namestringname must match the name of a persistentVolumeClaim in the pod
    -

    SecretKeySelector

    -

    SecretKeySelector selects a key of a Secret.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    keystringThe key of the secret to select from. Must be a valid secret key.
    namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
    optionalbooleanSpecify whether the Secret or its key must be defined
    -

    ManagedFieldsEntry

    -

    ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    apiVersionstringAPIVersion defines the version of this resource that this field set applies to. The format is "group/version" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.
    fieldsTypestringFieldsType is the discriminator for the different fields format and version. There is currently only one possible value: "FieldsV1"
    fieldsV1FieldsV1FieldsV1 holds the first JSON version format as described in the "FieldsV1" type.
    managerstringManager is an identifier of the workflow managing these fields.
    operationstringOperation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.
    subresourcestringSubresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource.
    timeTimeTime is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'
    -

    OwnerReference

    -

    OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    apiVersionstringAPI version of the referent.
    blockOwnerDeletionbooleanIf true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.
    controllerbooleanIf true, this reference points to the managing controller.
    kindstringKind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
    namestringName of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names
    uidstringUID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids
    -

    NodeAffinity

    -

    Node affinity is a group of node affinity scheduling rules.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    preferredDuringSchedulingIgnoredDuringExecutionArray<PreferredSchedulingTerm>The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
    requiredDuringSchedulingIgnoredDuringExecutionNodeSelectorIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
    -

    PodAffinity

    -

    Pod affinity is a group of inter pod affinity scheduling rules.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    preferredDuringSchedulingIgnoredDuringExecutionArray<WeightedPodAffinityTerm>The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
    requiredDuringSchedulingIgnoredDuringExecutionArray<PodAffinityTerm>If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
    -

    PodAntiAffinity

    -

    Pod anti affinity is a group of inter pod anti affinity scheduling rules.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    preferredDuringSchedulingIgnoredDuringExecutionArray<WeightedPodAffinityTerm>The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
    requiredDuringSchedulingIgnoredDuringExecutionArray<PodAffinityTerm>If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
    -

    PodDNSConfigOption

    -

    PodDNSConfigOption defines DNS resolver options of a pod.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    namestringRequired.
    valuestringNo description available
    -

    SELinuxOptions

    -

    SELinuxOptions are the labels to be applied to the container

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    levelstringLevel is SELinux level label that applies to the container.
    rolestringRole is a SELinux role label that applies to the container.
    typestringType is a SELinux type label that applies to the container.
    userstringUser is a SELinux user label that applies to the container.
    -

    SeccompProfile

    -

    SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    localhostProfilestringlocalhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost".
    typestringtype indicates which kind of seccomp profile will be applied. Valid options are: Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. Possible enum values: - "Localhost" indicates a profile defined in a file on the node should be used. The file's location relative to /seccomp. - "RuntimeDefault" represents the default container runtime seccomp profile. - "Unconfined" indicates no seccomp profile is applied (A.K.A. unconfined).
    -

    Sysctl

    -

    Sysctl defines a kernel parameter to be set

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    namestringName of a property to set
    valuestringValue of a property to set
    -

    WindowsSecurityContextOptions

    -

    WindowsSecurityContextOptions contain Windows-specific options and credentials.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    gmsaCredentialSpecstringGMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.
    gmsaCredentialSpecNamestringGMSACredentialSpecName is the name of the GMSA credential spec to use.
    hostProcessbooleanHostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.
    runAsUserNamestringThe UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
    -

    PersistentVolumeClaimSpec

    -

    PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    accessModesArray< string >AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
    dataSourceTypedLocalObjectReferenceThis field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.
    dataSourceRefTypedLocalObjectReferenceSpecifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
    resourcesResourceRequirementsResources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
    selectorLabelSelectorA label query over volumes to consider for binding.
    storageClassNamestringName of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
    volumeModestringvolumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.
    volumeNamestringVolumeName is the binding reference to the PersistentVolume backing this claim.
    -

    PersistentVolumeClaimStatus

    -

    PersistentVolumeClaimStatus is the current status of a persistent volume claim.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    accessModesArray< string >AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
    allocatedResourcesQuantityThe storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
    capacityQuantityRepresents the actual resources of the underlying volume.
    conditionsArray<PersistentVolumeClaimCondition>Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.
    phasestringPhase represents the current phase of PersistentVolumeClaim. Possible enum values: - "Bound" used for PersistentVolumeClaims that are bound - "Lost" used for PersistentVolumeClaims that lost their underlying PersistentVolume. The claim was bound to a PersistentVolume and this volume does not exist any longer and all data on it was lost. - "Pending" used for PersistentVolumeClaims that are not yet bound
    resizeStatusstringResizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
    -

    AWSElasticBlockStoreVolumeSource

    -

    Represents a Persistent Disk resource in AWS. An AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    fsTypestringFilesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
    partitionintegerThe partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
    readOnlybooleanSpecify "true" to force and set the ReadOnly property in VolumeMounts to "true". If omitted, the default is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
    volumeIDstringUnique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
    -

    AzureDiskVolumeSource

    -

    AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    cachingModestringHost Caching mode: None, Read Only, Read Write.
    diskNamestringThe Name of the data disk in the blob storage
    diskURIstringThe URI the data disk in the blob storage
    fsTypestringFilesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
    kindstringExpected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared
    readOnlybooleanDefaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
    -

    AzureFileVolumeSource

    -

    AzureFile represents an Azure File Service mount on the host and bind mount to the pod.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    readOnlybooleanDefaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
    secretNamestringthe name of secret that contains Azure Storage Account Name and Key
    shareNamestringShare Name
    -

    CephFSVolumeSource

    -

    Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    monitorsArray< string >Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
    pathstringOptional: Used as the mounted root, rather than the full Ceph tree, default is /
    readOnlybooleanOptional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
    secretFilestringOptional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
    secretRefLocalObjectReferenceOptional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
    userstringOptional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
    -

    CinderVolumeSource

    -

    Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    fsTypestringFilesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md
    readOnlybooleanOptional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md
    secretRefLocalObjectReferenceOptional: points to a secret object containing parameters used to connect to OpenStack.
    volumeIDstringvolume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md
    -

    ConfigMapVolumeSource

    -

    Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    defaultModeintegerOptional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
    itemsArray<KeyToPath>If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.
    namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
    optionalbooleanSpecify whether the ConfigMap or its keys must be defined
    -

    CSIVolumeSource

    -

    Represents a source location of a volume to mount, managed by an external CSI driver

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    driverstringDriver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.
    fsTypestringFilesystem type to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.
    nodePublishSecretRefLocalObjectReferenceNodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.
    readOnlybooleanSpecifies a read-only configuration for the volume. Defaults to false (read/write).
    volumeAttributesMap< string , string >VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.
    -

    DownwardAPIVolumeSource

    -

    DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    defaultModeintegerOptional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
    itemsArray<DownwardAPIVolumeFile>Items is a list of downward API volume file
    -

    EmptyDirVolumeSource

    -

    Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    mediumstringWhat type of storage medium should back this directory. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
    sizeLimitQuantityTotal amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir
    -

    EphemeralVolumeSource

    -

    Represents an ephemeral volume that is handled by a normal storage driver.

    -

    Fields

    - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    volumeClaimTemplatePersistentVolumeClaimTemplateWill be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be <pod name>-<volume name> where <volume name> is the name from the PodSpec.Volumes array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). An existing PVC with that name that is not owned by the pod will not be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster. This field is read-only and no changes will be made by Kubernetes to the PVC after it has been created. Required, must not be nil.
    -

    FCVolumeSource

    -

    Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    fsTypestringFilesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
    lunintegerOptional: FC target lun number
    readOnlybooleanOptional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
    targetWWNsArray< string >Optional: FC target worldwide names (WWNs)
    wwidsArray< string >Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
    -

    FlexVolumeSource

    -

    FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    driverstringDriver is the name of the driver to use for this volume.
    fsTypestringFilesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
    optionsMap< string , string >Optional: Extra command options if any.
    readOnlybooleanOptional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
    secretRefLocalObjectReferenceOptional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.
    -

    FlockerVolumeSource

    -

    Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    datasetNamestringName of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated
    datasetUUIDstringUUID of the dataset. This is unique identifier of a Flocker dataset
    -

    GCEPersistentDiskVolumeSource

    -

    Represents a Persistent Disk resource in Google Compute Engine. A GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    fsTypestringFilesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
    partitionintegerThe partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
    pdNamestringUnique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
    readOnlybooleanReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
    -

    GitRepoVolumeSource

    -

    Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    directorystringTarget directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.
    repositorystringRepository URL
    revisionstringCommit hash for the specified revision.
    -

    GlusterfsVolumeSource

    -

    Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    endpointsstringEndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
    pathstringPath is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
    readOnlybooleanReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
    -

    HostPathVolumeSource

    -

    Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    pathstringPath of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
    typestringType for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
    -

    ISCSIVolumeSource

    -

    Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    chapAuthDiscoverybooleanwhether support iSCSI Discovery CHAP authentication
    chapAuthSessionbooleanwhether support iSCSI Session CHAP authentication
    fsTypestringFilesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
    initiatorNamestringCustom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.
    iqnstringTarget iSCSI Qualified Name.
    iscsiInterfacestringiSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).
    lunintegeriSCSI Target Lun number.
    portalsArray< string >iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).
    readOnlybooleanReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.
    secretRefLocalObjectReferenceCHAP Secret for iSCSI target and initiator authentication
    targetPortalstringiSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).
    -

    NFSVolumeSource

    -

    Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    pathstringPath that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
    readOnlybooleanReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
    serverstringServer is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
    -

    PersistentVolumeClaimVolumeSource

    -

    PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    claimNamestringClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
    readOnlybooleanWill force the ReadOnly setting in VolumeMounts. Default false.
    -

    PhotonPersistentDiskVolumeSource

    -

    Represents a Photon Controller persistent disk resource.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    fsTypestringFilesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
    pdIDstringID that identifies Photon Controller persistent disk
    -

    PortworxVolumeSource

    -

    PortworxVolumeSource represents a Portworx volume resource.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    fsTypestringFSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
    readOnlybooleanDefaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
    volumeIDstringVolumeID uniquely identifies a Portworx volume
    -

    ProjectedVolumeSource

    -

    Represents a projected volume source

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    defaultModeintegerMode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
    sourcesArray<VolumeProjection>list of volume projections
    -

    QuobyteVolumeSource

    -

    Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    groupstringGroup to map volume access to Default is no group
    readOnlybooleanReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.
    registrystringRegistry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes
    tenantstringTenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin
    userstringUser to map volume access to Defaults to serivceaccount user
    volumestringVolume is a string that references an already created Quobyte volume by name.
    -

    RBDVolumeSource

    -

    Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    fsTypestringFilesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
    imagestringThe rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
    keyringstringKeyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
    monitorsArray< string >A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
    poolstringThe rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
    readOnlybooleanReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
    secretRefLocalObjectReferenceSecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
    userstringThe rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
    -

    ScaleIOVolumeSource

    -

    ScaleIOVolumeSource represents a persistent ScaleIO volume

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    fsTypestringFilesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs".
    gatewaystringThe host address of the ScaleIO API Gateway.
    protectionDomainstringThe name of the ScaleIO Protection Domain for the configured storage.
    readOnlybooleanDefaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
    secretRefLocalObjectReferenceSecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.
    sslEnabledbooleanFlag to enable/disable SSL communication with Gateway, default false
    storageModestringIndicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.
    storagePoolstringThe ScaleIO Storage Pool associated with the protection domain.
    systemstringThe name of the storage system as configured in ScaleIO.
    volumeNamestringThe name of a volume already created in the ScaleIO system that is associated with this volume source.
    -

    SecretVolumeSource

    -

    Adapts a Secret into a volume. The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    defaultModeintegerOptional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
    itemsArray<KeyToPath>If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.
    optionalbooleanSpecify whether the Secret or its keys must be defined
    secretNamestringName of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
    -

    StorageOSVolumeSource

    -

    Represents a StorageOS persistent volume resource.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    fsTypestringFilesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
    readOnlybooleanDefaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
    secretRefLocalObjectReferenceSecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.
    volumeNamestringVolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.
    volumeNamespacestringVolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.
    -

    VsphereVirtualDiskVolumeSource

    -

    Represents a vSphere volume resource.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    fsTypestringFilesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
    storagePolicyIDstringStorage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.
    storagePolicyNamestringStorage Policy Based Management (SPBM) profile name.
    volumePathstringPath that identifies vSphere volume vmdk
    -

    LabelSelectorRequirement

    -

    A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    keystringkey is the label key that the selector applies to.
    operatorstringoperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
    valuesArray< string >values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
    -

    EnvVarSource

    -

    EnvVarSource represents a source for the value of an EnvVar.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    configMapKeyRefConfigMapKeySelectorSelects a key of a ConfigMap.
    fieldRefObjectFieldSelectorSelects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels['<KEY>'], metadata.annotations['<KEY>'], spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
    resourceFieldRefResourceFieldSelectorSelects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
    secretKeyRefSecretKeySelectorSelects a key of a secret in the pod's namespace
    -

    ConfigMapEnvSource

    -

    ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
    optionalbooleanSpecify whether the ConfigMap must be defined
    -

    SecretEnvSource

    -

    SecretEnvSource selects a Secret to populate the environment variables with. The contents of the target Secret's Data field will represent the key-value pairs as environment variables.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
    optionalbooleanSpecify whether the Secret must be defined
    -

    LifecycleHandler

    -

    LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    execExecActionExec specifies the action to take.
    httpGetHTTPGetActionHTTPGet specifies the http request to perform.
    tcpSocketTCPSocketActionDeprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.
    -

    ExecAction

    -

    ExecAction describes a "run in container" action.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    commandArray< string >Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('
    -

    GRPCAction

    -

    No description available

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    portintegerPort number of the gRPC service. Number must be in the range 1 to 65535.
    servicestringService is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). If this is not specified, the default behavior is defined by gRPC.
    -

    HTTPGetAction

    -

    HTTPGetAction describes an action based on HTTP Get requests.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    hoststringHost name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
    httpHeadersArray<HTTPHeader>Custom headers to set in the request. HTTP allows repeated headers.
    pathstringPath to access on the HTTP server.
    portIntOrStringName or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
    schemestringScheme to use for connecting to the host. Defaults to HTTP. Possible enum values: - "HTTP" means that the scheme used will be http:// - "HTTPS" means that the scheme used will be https://
    -

    TCPSocketAction

    -

    TCPSocketAction describes an action based on opening a socket

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    hoststringOptional: Host name to connect to, defaults to the pod IP.
    portIntOrStringNumber or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
    -

    Quantity

    -

    Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: ::= (Note that may be empty, from the "" case in .) ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= "+" | "-" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) ::= m | "" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) ::= "e" | "E" No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in "canonical form". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: a. No precision is lost b. No fractional digits will be emitted c. The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: 1.5 will be serialized as "1500m" 1.5Gi will be serialized as "1536Mi" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Capabilities

    -

    Adds and removes POSIX capabilities from running containers.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    addArray< string >Added capabilities
    dropArray< string >Removed capabilities
    -

    FieldsV1

    -

    FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. Each key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set. The exact format is defined in sigs.k8s.io/structured-merge-diff

    -

    PreferredSchedulingTerm

    -

    An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    preferenceNodeSelectorTermA node selector term, associated with the corresponding weight.
    weightintegerWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
    -

    NodeSelector

    -

    A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.

    -

    Fields

    - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    nodeSelectorTermsArray<NodeSelectorTerm>Required. A list of node selector terms. The terms are ORed.
    -

    WeightedPodAffinityTerm

    -

    The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    podAffinityTermPodAffinityTermRequired. A pod affinity term, associated with the corresponding weight.
    weightintegerweight associated with matching the corresponding podAffinityTerm, in the range 1-100.
    -

    PodAffinityTerm

    -

    Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    labelSelectorLabelSelectorA label query over a set of resources, in this case pods.
    namespaceSelectorLabelSelectorA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
    namespacesArray< string >namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"
    topologyKeystringThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
    -

    TypedLocalObjectReference

    -

    TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    apiGroupstringAPIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.
    kindstringKind is the type of resource being referenced
    namestringName is the name of resource being referenced
    -

    PersistentVolumeClaimCondition

    -

    PersistentVolumeClaimCondition contails details about state of pvc

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    lastProbeTimeTimeLast time we probed the condition.
    lastTransitionTimeTimeLast time the condition transitioned from one status to another.
    messagestringHuman-readable message indicating details about last transition.
    reasonstringUnique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized.
    statusstringNo description available
    typestringPossible enum values: - "FileSystemResizePending" - controller resize is finished and a file system resize is pending on node - "Resizing" - a user trigger resize of pvc has been started
    -

    KeyToPath

    -

    Maps a string key to a path within a volume.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    keystringThe key to project.
    modeintegerOptional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
    pathstringThe relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.
    -

    DownwardAPIVolumeFile

    -

    DownwardAPIVolumeFile represents information to create the file containing the pod field

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    fieldRefObjectFieldSelectorRequired: Selects a field of the pod: only annotations, labels, name and namespace are supported.
    modeintegerOptional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
    pathstringRequired: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
    resourceFieldRefResourceFieldSelectorSelects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
    -

    PersistentVolumeClaimTemplate

    -

    PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    metadataObjectMetaMay contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.
    specPersistentVolumeClaimSpecThe specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here.
    -

    VolumeProjection

    -

    Projection that may be projected along with other supported volume types

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    configMapConfigMapProjectioninformation about the configMap data to project
    downwardAPIDownwardAPIProjectioninformation about the downwardAPI data to project
    secretSecretProjectioninformation about the secret data to project
    serviceAccountTokenServiceAccountTokenProjectioninformation about the serviceAccountToken data to project
    -

    ObjectFieldSelector

    -

    ObjectFieldSelector selects an APIVersioned field of an object.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    apiVersionstringVersion of the schema the FieldPath is written in terms of, defaults to "v1".
    fieldPathstringPath of the field to select in the specified API version.
    -

    ResourceFieldSelector

    -

    ResourceFieldSelector represents container resources (cpu, memory) and their output format

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    containerNamestringContainer name: required for volumes, optional for env vars
    divisorQuantitySpecifies the output format of the exposed resources, defaults to "1"
    resourcestringRequired: resource to select
    -

    HTTPHeader

    -

    HTTPHeader describes a custom header to be used in HTTP probes

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    namestringThe header field name
    valuestringThe header field value
    -

    NodeSelectorTerm

    -

    A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    matchExpressionsArray<NodeSelectorRequirement>A list of node selector requirements by node's labels.
    matchFieldsArray<NodeSelectorRequirement>A list of node selector requirements by node's fields.
    -

    ConfigMapProjection

    -

    Adapts a ConfigMap into a projected volume. The contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    itemsArray<KeyToPath>If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.
    namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
    optionalbooleanSpecify whether the ConfigMap or its keys must be defined
    -

    DownwardAPIProjection

    -

    Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.

    -

    Fields

    - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    itemsArray<DownwardAPIVolumeFile>Items is a list of DownwardAPIVolume file
    -

    SecretProjection

    -

    Adapts a secret into a projected volume. The contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.

    -
    -Examples with this field (click to open) -
    - -
    - -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    itemsArray<KeyToPath>If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.
    namestringName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
    optionalbooleanSpecify whether the Secret or its key must be defined
    -

    ServiceAccountTokenProjection

    -

    ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    audiencestringAudience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.
    expirationSecondsintegerExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.
    pathstringPath is the path relative to the mount point of the file to project the token into.
    -

    NodeSelectorRequirement

    -

    A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.

    -

    Fields

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameField TypeDescription
    keystringThe label key that the selector applies to.
    operatorstringRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. Possible enum values: - "DoesNotExist" - "Exists" - "Gt" - "In" - "Lt" - "NotIn"
    valuesArray< string >An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
    - - - - -

    Comments

    - - - - -
    -
    - - - - - Back to top - - - - - - - -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/high-availability/index.html b/high-availability/index.html index 8b74de7f027b..46659f0090b5 100644 --- a/high-availability/index.html +++ b/high-availability/index.html @@ -1,3995 +1,68 @@ - - - - - - - - - - - - - High-Availability (HA) - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + High-Availability (HA) - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    High-Availability (HA)

    -

    Workflow Controller

    -

    Before v3.0, only one controller could run at once. (If it crashed, Kubernetes would start another pod.)

    -
    -

    v3.0

    -
    -

    For many users, a short loss of workflow service may be acceptable - the new controller will just continue running -workflows if it restarts. However, with high service guarantees, new pods may take too long to start running workflows. -You should run two replicas, and one of which will be kept on hot-standby.

    -

    A voluntary pod disruption can cause both replicas to be replaced at the same time. You should use a Pod Disruption -Budget to prevent this and Pod Priority to recover faster from an involuntary pod disruption:

    - -

    Argo Server

    -
    -

    v2.6

    -
    -

    Run a minimum of two replicas, typically three, should be run, otherwise it may be possible that API and webhook requests are dropped.

    - - - - - -

    Comments

    - - +

    High-Availability (HA) - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/http-template/index.html b/http-template/index.html index a6418d8fc7f8..3c9403250716 100644 --- a/http-template/index.html +++ b/http-template/index.html @@ -1,4003 +1,68 @@ - - - - - - - - - - - - - HTTP Template - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + HTTP Template - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    HTTP Template

    -
    -

    v3.2 and after

    -
    -

    HTTP Template is a type of template which can execute HTTP Requests.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: http-template-
    -spec:
    -  entrypoint: main
    -  templates:
    -    - name: main
    -      steps:
    -        - - name: get-google-homepage
    -            template: http
    -            arguments:
    -              parameters: [{name: url, value: "https://www.google.com"}]
    -    - name: http
    -      inputs:
    -        parameters:
    -          - name: url
    -      http:
    -        timeoutSeconds: 20 # Default 30
    -        url: "{{inputs.parameters.url}}"
    -        method: "GET" # Default GET
    -        headers:
    -          - name: "x-header-name"
    -            value: "test-value"
    -        # Template will succeed if evaluated to true, otherwise will fail
    -        # Available variables:
    -        #  request.body: string, the request body
    -        #  request.headers: map[string][]string, the request headers
    -        #  response.url: string, the request url
    -        #  response.method: string, the request method
    -        #  response.statusCode: int, the response status code
    -        #  response.body: string, the response body
    -        #  response.headers: map[string][]string, the response headers
    -        successCondition: "response.body contains \"google\"" # available since v3.3
    -        body: "test body" # Change request body
    -
    -

    Argo Agent

    -

    HTTP Templates use the Argo Agent, which executes the requests independently of the controller. The Agent and the Workflow -Controller communicate through the WorkflowTaskSet CRD, which is created for each running Workflow that requires the use -of the Agent.

    -

    In order to use the Argo Agent, you will need to ensure that you have added the appropriate workflow RBAC to add an agent role with to Argo Workflows. An example agent role can be found in the quick-start manifests.

    - - - - -

    Comments

    - - +

    HTTP Template - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/ide-setup/index.html b/ide-setup/index.html index 029d238bc94a..25df836d43e2 100644 --- a/ide-setup/index.html +++ b/ide-setup/index.html @@ -1,4050 +1,68 @@ - - - - - - - - - - - - - IDE Set-Up - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + IDE Set-Up - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - - - - +
    +
    +
    +
    - - - - - - - - -

    IDE Set-Up

    -

    Validating Argo YAML against the JSON Schema

    -

    Argo provides a JSON Schema that enables validation of YAML resources in your IDE.

    -

    JetBrains IDEs (Community & Ultimate Editions)

    -

    YAML validation is supported natively in IDEA.

    -

    Configure your IDE to reference the Argo schema and map it to your Argo YAML files:

    -

    JetBrains IDEs Configure Schema

    -
      -
    • The schema is located here.
    • -
    • Specify a file glob pattern that locates your Argo files. The example glob here is for the Argo Github project!
    • -
    • Note that you may need to restart IDEA to pick up the changes.
    • -
    -

    That's it. Open an Argo YAML file and you should see smarter behavior, including type errors and context-sensitive auto-complete.

    -

    JetBrains IDEs Example Functionality

    -

    JetBrains IDEs (Community & Ultimate Editions) + Kubernetes Plugin

    -

    If you have the JetBrains Kubernetes Plugin -installed in your IDE, the validation can be configured in the Kubernetes plugin settings -instead of using the internal JSON schema file validator.

    -

    JetBrains IDEs Configure Schema with Kubernetes Plugin

    -

    Unlike the previous JSON schema validation method, the plugin detects the necessary validation -based on Kubernetes resource definition keys and does not require a file glob pattern. -Like the previously described method:

    -
      -
    • The schema is located here.
    • -
    • Note that you may need to restart IDEA to pick up the changes.
    • -
    -

    VSCode

    -

    The Red Hat YAML plugin will provide error highlighting and auto-completion for Argo resources.

    -

    Install the Red Hat YAML plugin in VSCode and open extension settings:

    -

    VSCode Install Plugin

    -

    Open the YAML schema settings:

    -

    VSCode YAML Schema Settings

    -

    Add the Argo schema setting yaml.schemas:

    -

    VSCode Specify Argo Schema

    -
      -
    • The schema is located here.
    • -
    • Specify a file glob pattern that locates your Argo files. The example glob here is for the Argo Github project!
    • -
    • Note that other defined schema with overlapping glob patterns may cause errors.
    • -
    -

    That's it. Open an Argo YAML file and you should see smarter behavior, including type errors and context-sensitive auto-complete.

    -

    VScode Example Functionality

    - - - - -

    Comments

    - - +

    IDE Set-Up - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/index.html b/index.html index 2f194fd1ccb6..fe59df6f0497 100644 --- a/index.html +++ b/index.html @@ -1,4254 +1,68 @@ - - - - - - - - - - - - - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - - - - +
    +
    +
    +
    - - - - - - - - -

    Argo Workflows

    -

    slack -CII Best Practices -Twitter Follow

    -

    What is Argo Workflows?

    -

    Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. Argo -Workflows is implemented as a Kubernetes CRD (Custom Resource Definition).

    -
      -
    • Define workflows where each step in the workflow is a container.
    • -
    • Model multi-step workflows as a sequence of tasks or capture the dependencies between tasks using a directed acyclic - graph (DAG).
    • -
    • Easily run compute intensive jobs for machine learning or data processing in a fraction of the time using Argo - Workflows on Kubernetes.
    • -
    -

    Argo is a Cloud Native Computing Foundation (CNCF) graduated project.

    -

    Use Cases

    - -

    Why Argo Workflows?

    -
      -
    • Argo Workflows is the most popular workflow execution engine for Kubernetes.
    • -
    • Light-weight, scalable, and easier to use.
    • -
    • Designed from the ground up for containers without the overhead and limitations of legacy VM and server-based - environments.
    • -
    • Cloud agnostic and can run on any Kubernetes cluster.
    • -
    -

    Read what people said in our latest survey

    -

    Try Argo Workflows

    -

    Access the demo environment (login using Github)

    -

    Screenshot

    -

    Who uses Argo Workflows?

    -

    About 200+ organizations are officially using Argo Workflows

    -

    Ecosystem

    -

    Just some of the projects that use or rely on Argo Workflows (complete list here):

    - -

    Client Libraries

    -

    Check out our Java, Golang and Python clients.

    -

    Quickstart

    - -

    Documentation

    -

    View the docs

    -

    Features

    -

    An incomplete list of features Argo Workflows provide:

    -
      -
    • UI to visualize and manage Workflows
    • -
    • Artifact support (S3, Artifactory, Alibaba Cloud OSS, Azure Blob Storage, HTTP, Git, GCS, raw)
    • -
    • Workflow templating to store commonly used Workflows in the cluster
    • -
    • Archiving Workflows after executing for later access
    • -
    • Scheduled workflows using cron
    • -
    • Server interface with REST API (HTTP and GRPC)
    • -
    • DAG or Steps based declaration of workflows
    • -
    • Step level input & outputs (artifacts/parameters)
    • -
    • Loops
    • -
    • Parameterization
    • -
    • Conditionals
    • -
    • Timeouts (step & workflow level)
    • -
    • Retry (step & workflow level)
    • -
    • Resubmit (memoized)
    • -
    • Suspend & Resume
    • -
    • Cancellation
    • -
    • K8s resource orchestration
    • -
    • Exit Hooks (notifications, cleanup)
    • -
    • Garbage collection of completed workflow
    • -
    • Scheduling (affinity/tolerations/node selectors)
    • -
    • Volumes (ephemeral/existing)
    • -
    • Parallelism limits
    • -
    • Daemoned steps
    • -
    • DinD (docker-in-docker)
    • -
    • Script steps
    • -
    • Event emission
    • -
    • Prometheus metrics
    • -
    • Multiple executors
    • -
    • Multiple pod and workflow garbage collection strategies
    • -
    • Automatically calculated resource usage per step
    • -
    • Java/Golang/Python SDKs
    • -
    • Pod Disruption Budget support
    • -
    • Single-sign on (OAuth2/OIDC)
    • -
    • Webhook triggering
    • -
    • CLI
    • -
    • Out-of-the box and custom Prometheus metrics
    • -
    • Windows container support
    • -
    • Embedded widgets
    • -
    • Multiplex log viewer
    • -
    -

    Community Meetings

    -

    We host monthly community meetings where we and the community showcase demos and discuss the current and future state of -the project. Feel free to join us! For Community Meeting information, minutes and recordings -please see here.

    -

    Participation in the Argo Workflows project is governed by -the CNCF Code of Conduct

    -

    Community Blogs and Presentations

    - -

    Project Resources

    - -

    Security

    -

    See SECURITY.md.

    - - - - -

    Comments

    - - +

    Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/inline-templates/index.html b/inline-templates/index.html index bf4e35b12b5d..7075e36b0fa1 100644 --- a/inline-templates/index.html +++ b/inline-templates/index.html @@ -1,3925 +1,68 @@ - - - - - - - - - - - - - Inline Templates - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Inline Templates - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Inline Templates

    -
    -

    v3.2 and after

    -
    -

    You can inline other templates within DAG and steps.

    -

    Examples:

    - -
    -

    Warning

    -

    You can only inline once. Inline a DAG within a DAG will not work.

    -
    - - - - -

    Comments

    - - +

    Inline Templates - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/installation/index.html b/installation/index.html index 97bc1e59f736..aeec60e9b25c 100644 --- a/installation/index.html +++ b/installation/index.html @@ -1,4090 +1,68 @@ - - - - - - - - - - - - - Installation - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Installation - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    - -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Installation

    -

    Non-production installation

    -

    If you just want to try out Argo Workflows in a non-production environment (including on desktop via minikube/kind/k3d etc) follow the quick-start guide.

    -

    Production installation

    -

    Installation Methods

    -

    Official release manifests

    -

    To install Argo Workflows, navigate to the releases page and find the release you wish to use (the latest full release is preferred). Scroll down to the Controller and Server section and execute the kubectl commands.

    -

    You can use Kustomize to patch your preferred configurations on top of the base manifest.

    -

    ⚠️ If you are using GitOps, never use Kustomize remote base: this is dangerous. Instead, copy the manifests into your Git repo.

    -

    ⚠️ latest is tip, not stable. Never run it in production.

    -

    Argo Workflows Helm Chart

    -

    You can install Argo Workflows using the community maintained Helm charts.

    -

    Installation options

    -

    Determine your base installation option.

    -
      -
    • A cluster install will watch and execute workflows in all namespaces. This is the default installation option when installing using the official release manifests.
    • -
    • A namespace install only executes workflows in the namespace it is installed in (typically argo). Look for namespace-install.yaml in the release assets.
    • -
    • A managed namespace install: only executes workflows in a separate namespace from the one it is installed in. See Managed Namespace for more details.
    • -
    -

    Additional installation considerations

    -

    Review the following:

    - - - - - -

    Comments

    - - +

    Installation - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/intermediate-inputs/index.html b/intermediate-inputs/index.html index 255132d93a03..0980ab5ec2ea 100644 --- a/intermediate-inputs/index.html +++ b/intermediate-inputs/index.html @@ -1,4138 +1,68 @@ - - - - - - - - - - - - - Intermediate Parameters - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Intermediate Parameters - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Intermediate Parameters

    -
    -

    v3.4 and after

    -
    -

    Traditionally, Argo workflows has supported input parameters from UI only when the workflow starts, -and after that, it's pretty much on autopilot. But, there are a lot of use cases where human interaction is required.

    -

    This interaction is in the form of providing input text in the middle of the workflow, choosing from a dropdown of the options which a workflow step itself is intelligently generating.

    -

    A similar feature which you can see in jenkins is pipeline-input-step

    -

    Example use cases include:

    -
      -
    • A human approval before doing something in production environment.
    • -
    • Programmatic generation of a list of inputs from which the user chooses. -Choosing from a list of available databases which the workflow itself is generating.
    • -
    -

    This feature is achieved via suspend template.

    -

    The workflow will pause at a Suspend node, and user will be able to update parameters using fields type text or dropdown.

    -

    Intermediate Parameters Approval Example

    -
      -
    • The below example shows static enum values approval step.
    • -
    • The user will be able to choose between [YES, NO] which will be used in subsequent steps.
    • -
    -

    Approval Example Demo

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: intermediate-parameters-cicd-
    -spec:
    -  entrypoint: cicd-pipeline
    -  templates:
    -    - name: cicd-pipeline
    -      steps:
    -          - - name: deploy-pre-prod
    -              template: deploy
    -          - - name: approval
    -              template: approval
    -          - - name: deploy-prod
    -              template: deploy
    -              when: '{{steps.approval.outputs.parameters.approve}} == YES'
    -    - name: approval
    -      suspend: {}
    -      inputs:
    -          parameters:
    -            - name: approve
    -              default: 'NO'
    -              enum:
    -                  - 'YES'
    -                  - 'NO'
    -              description: >-
    -                Choose YES to continue workflow and deploy to production
    -      outputs:
    -          parameters:
    -            - name: approve
    -              valueFrom:
    -                  supplied: {}
    -    - name: deploy
    -      container:
    -          image: 'argoproj/argosay:v2'
    -          command:
    -            - /argosay
    -          args:
    -            - echo
    -            - deploying
    -
    -

    Intermediate Parameters DB Schema Update Example

    -
      -
    • The below example shows programmatic generation of enum values.
    • -
    • The generate-db-list template generates an output called db_list.
    • -
    • This output is of type json.
    • -
    • Since this json has a key called enum, with an array of options, the UI will parse this and display it as a dropdown.
    • -
    • The output can be any string also, in which case the UI will display it as a text field. Which the user can later edit.
    • -
    -

    DB Schema Update Example Demo

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: intermediate-parameters-db-
    -spec:
    -  entrypoint: db-schema-update
    -  templates:
    -      - name: db-schema-update
    -        steps:
    -          - - name: generate-db-list
    -              template: generate-db-list
    -          - - name: choose-db
    -              template: choose-db
    -              arguments:
    -                parameters:
    -                  - name: db_name
    -                    value: '{{steps.generate-db-list.outputs.parameters.db_list}}'
    -          - - name: update-schema
    -              template: update-schema
    -              arguments:
    -                parameters:
    -                  - name: db_name
    -                    value: '{{steps.choose-db.outputs.parameters.db_name}}'
    -      - name: generate-db-list
    -        outputs:
    -          parameters:
    -            - name: db_list
    -              valueFrom:
    -                path: /tmp/db_list.txt
    -        container:
    -          name: main
    -          image: 'argoproj/argosay:v2'
    -          command:
    -            - sh
    -            - '-c'
    -          args:
    -            - >-
    -              echo "{\"enum\": [\"db1\", \"db2\", \"db3\"]}" | tee /tmp/db_list.txt
    -      - name: choose-db
    -        inputs:
    -          parameters:
    -            - name: db_name
    -              description: >-
    -                Choose DB to update a schema
    -        outputs:
    -          parameters:
    -            - name: db_name
    -              valueFrom:
    -                supplied: {}
    -        suspend: {}
    -      - name: update-schema
    -        inputs:
    -          parameters:
    -            - name: db_name
    -        container:
    -          name: main
    -          image: 'argoproj/argosay:v2'
    -          command:
    -            - sh
    -            - '-c'
    -          args:
    -            - echo Updating DB {{inputs.parameters.db_name}}
    -
    -

    Some Important Details

    -
      -
    • The suspended node should have the SAME parameters defined in inputs.parameters and outputs.parameters.
    • -
    • All the output parameters in the suspended node should have valueFrom.supplied: {}
    • -
    • The selected values will be available at <SUSPENDED_NODE>.outputs.parameters.<PARAMETER_NAME>
    • -
    - - - - -

    Comments

    - - +

    Intermediate Parameters - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/key-only-artifacts/index.html b/key-only-artifacts/index.html index 88d6e9130eea..66aed8159725 100644 --- a/key-only-artifacts/index.html +++ b/key-only-artifacts/index.html @@ -1,3966 +1,68 @@ - - - - - - - - - - - - - Key-Only Artifacts - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Key-Only Artifacts - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Key-Only Artifacts

    -
    -

    v3.0 and after

    -
    -

    A key-only artifact is an input or output artifact where you only specify the key, omitting the bucket, secrets etc. When these are omitted, the bucket/secrets from the configured artifact repository is used.

    -

    This allows you to move the configuration of the artifact repository out of the workflow specification.

    -

    This is closely related to artifact repository ref. You'll want to use them together for maximum benefit.

    -

    This should probably be your default if you're using v3.0:

    -
      -
    • Reduces the size of workflows (improved performance).
    • -
    • User owned artifact repository set-up configuration (simplified management).
    • -
    • Decouples the artifact location configuration from the workflow. Allowing you to re-configure the artifact repository without changing your workflows or templates.
    • -
    -

    Example:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: key-only-artifacts-
    -spec:
    -  entrypoint: main
    -  templates:
    -    - name: main
    -      dag:
    -        tasks:
    -          - name: generate
    -            template: generate
    -          - name: consume
    -            template: consume
    -            dependencies:
    -              - generate
    -    - name: generate
    -      container:
    -        image: argoproj/argosay:v2
    -        args: [ echo, hello, /mnt/file ]
    -      outputs:
    -        artifacts:
    -          - name: file
    -            path: /mnt/file
    -            s3:
    -              key: my-file
    -    - name: consume
    -      container:
    -        image: argoproj/argosay:v2
    -        args: [cat, /tmp/file]
    -      inputs:
    -        artifacts:
    -          - name: file
    -            path: /tmp/file
    -            s3:
    -              key: my-file
    -
    -
    -

    Warning

    -

    The location data is not longer stored in /status/nodes. Any tooling that relies on this will need to be updated.

    -
    - - - - -

    Comments

    - - +

    Key-Only Artifacts - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/kubectl/index.html b/kubectl/index.html index 9c038643093e..9a9645226208 100644 --- a/kubectl/index.html +++ b/kubectl/index.html @@ -1,3918 +1,68 @@ - - - - - - - - - - - - - kubectl - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + kubectl - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    kubectl

    -

    You can also create Workflows directly with kubectl. -However, the Argo CLI offers extra features that kubectl does not, such as YAML validation, workflow visualization, parameter passing, retries and resubmits, suspend and resume, and more.

    -
    kubectl create -n argo -f https://raw.githubusercontent.com/argoproj/argo-workflows/main/examples/hello-world.yaml
    -kubectl get wf -n argo
    -kubectl get wf hello-world-xxx -n argo
    -kubectl get po -n argo --selector=workflows.argoproj.io/workflow=hello-world-xxx
    -kubectl logs hello-world-yyy -c main -n argo
    -
    - - - - -

    Comments

    - - +

    kubectl - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/lifecyclehook/index.html b/lifecyclehook/index.html index c9883e567c3c..17a236ac16a4 100644 --- a/lifecyclehook/index.html +++ b/lifecyclehook/index.html @@ -1,4058 +1,68 @@ - - - - - - - - - - - - - Lifecycle-Hook - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Lifecycle-Hook - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Lifecycle-Hook

    -
    -

    v3.3 and after

    -
    -

    Introduction

    -

    A LifecycleHook triggers an action based on a conditional expression or on completion of a step or template. It is configured either at the workflow-level or template-level, for instance as a function of the workflow.status or steps.status, respectively. A LifecycleHook executes during execution time and executes once. It will execute in parallel to its step or template once the expression is satisfied.

    -

    In other words, a LifecycleHook functions like an exit handler with a conditional expression. You must not name a LifecycleHook exit or it becomes an exit handler; otherwise the hook name has no relevance.

    -

    Workflow-level LifecycleHook: Executes the template when a configured expression is met during the workflow.

    - -

    Template-level Lifecycle-Hook: Executes the template when a configured expression is met during the step in which it is defined.

    - -

    Supported conditions

    - -

    Unsupported conditions

    -
      -
    • outputs are not usable since LifecycleHook executes during execution time and outputs are not produced until the step is completed. You can use outputs from previous steps, just not the one you're hooking into. If you'd like to use outputs create an exit handler instead - all the status variable are available there so you can still conditionally decide what to do.
    • -
    -

    Notification use case

    -

    A LifecycleHook can be used to configure a notification depending on a workflow status change or template status change, like the example below:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    - generateName: lifecycle-hook-
    -spec:
    - entrypoint: main
    - hooks:
    -   exit:
    -     template: http
    -   running:
    -     expression: workflow.status == "Running"
    -     template: http
    - templates:
    -   - name: main
    -     steps:
    -       - - name: step1
    -           template: heads
    -
    -   - name: heads
    -     container:
    -       image: alpine:3.6
    -       command: [sh, -c]
    -       args: ["echo \"it was heads\""]
    -
    -   - name: http
    -     http:
    -       url: http://dummy.restapiexample.com/api/v1/employees
    -
    -
    -

    Put differently, an exit handler is like a workflow-level LifecycleHook with an expression of workflow.status == "Succeeded" or workflow.status == "Failed" or workflow.status == "Error".

    -
    - - - - -

    Comments

    - - +

    Lifecycle-Hook - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/links/index.html b/links/index.html index d10baf7221cc..b1547b125d58 100644 --- a/links/index.html +++ b/links/index.html @@ -1,3947 +1,68 @@ - - - - - - - - - - - - - Links - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Links - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Links

    -
    -

    v2.7 and after

    -
    -

    You can configure Argo Server to show custom links:

    -
      -
    • A "Get Help" button in the bottom right of the window linking to you organization help pages or chat room.
    • -
    • Deep-links to your facilities (e.g. logging facility) in the UI for both the workflow and each workflow pod.
    • -
    • Adds a button to the top of workflow view to navigate to customized views.
    • -
    -

    Links can contain placeholder variables. Placeholder variables are indicated by the dollar sign and curly braces: ${variable}.

    -

    These are the commonly used variables:

    -
      -
    • ${metadata.namespace}: Kubernetes namespace of the current workflow / pod / event source / sensor
    • -
    • ${metadata.name}: Name of the current workflow / pod / event source / sensor
    • -
    • ${status.startedAt}: Start time-stamp of the workflow / pod, in the format of 2021-01-01T10:35:56Z
    • -
    • ${status.finishedAt}: End time-stamp of the workflow / pod, in the format of 2021-01-01T10:35:56Z. If the workflow/pod is still running, this variable will be null
    • -
    -

    See workflow-controller-configmap.yaml for a complete example

    -
    -

    v3.1 and after

    -
    -

    Epoch time-stamps are available now. These are useful if we want to add links to logging facilities like Grafana -or DataDog, as they support Unix epoch time-stamp formats as URL -parameters:

    -
      -
    • ${status.startedAtEpoch}: Start time-stamp of the workflow/pod, in the Unix epoch time format in milliseconds, e.g. 1609497000000.
    • -
    • ${status.finishedAtEpoch}: End time-stamp of the workflow/pod, in the Unix epoch time format in milliseconds, e.g. 1609497000000. If the workflow/pod is still running, this variable will represent the current time.
    • -
    -
    -

    v3.1 and after

    -
    -

    In addition to the above variables, we can now access all workflow fields under ${workflow}.

    -

    For example, one may find it useful to define a custom label in the workflow and access it by ${workflow.metadata.labels.custom_label_name}

    -

    We can also access workflow fields in a pod link. For example, ${workflow.metadata.name} returns -the name of the workflow instead of the name of the pod. If the field doesn't exist on the workflow then the value will be an empty string.

    - - - - -

    Comments

    - - +

    Links - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/managed-namespace/index.html b/managed-namespace/index.html index fc6e47a2055a..e585b17ea14a 100644 --- a/managed-namespace/index.html +++ b/managed-namespace/index.html @@ -1,3937 +1,68 @@ - - - - - - - - - - - - - Managed Namespace - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Managed Namespace - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Managed Namespace

    -
    -

    v2.5 and after

    -
    -

    You can install Argo in either namespace scoped or cluster scoped configurations. -The main difference is whether you install Roles or ClusterRoles, respectively.

    -

    In namespace scoped configuration, you must run both the Workflow Controller and Argo Server using --namespaced. -If you want to run workflows in a separate namespace, add --managed-namespace as well. -(In cluster scoped configuration, don't include --namespaced or --managed-namespace.)

    -

    For example:

    -
          - args:
    -        - --configmap
    -        - workflow-controller-configmap
    -        - --executor-image
    -        - argoproj/workflow-controller:v2.5.1
    -        - --namespaced
    -        - --managed-namespace
    -        - default
    -
    -

    Please note that both cluster scoped and namespace scoped configurations require "admin" roles to install because Argo's Custom Resource Definitions (CRDs) must be created (CRDs are cluster scoped objects).

    -
    -

    Example Use Case

    -

    You can use a managed namespace install if you want some users or services to run Workflows without granting them privileges in the namespace where Argo Workflows is installed. -For example, if you only run CI/CD Workflows that are maintained by the same team that manages the Argo Workflows installation, you may want a namespace install. -But if all the Workflows are run by a separate data science team, you may want to give them a "data-science-workflows" namespace and use a managed namespace install of Argo Workflows in another namespace.

    -
    - - - - -

    Comments

    - - +

    Managed Namespace - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/manually-create-secrets/index.html b/manually-create-secrets/index.html index 7a8e21731576..9a6aaaacdae5 100644 --- a/manually-create-secrets/index.html +++ b/manually-create-secrets/index.html @@ -1,3997 +1,68 @@ - - - - - - - - - - - - - Service Account Secrets - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Service Account Secrets - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Service Account Secrets

    -

    As of Kubernetes v1.24, secrets are no longer automatically created for service accounts.

    -

    You must create a secret manually.

    -

    You must also make the secret discoverable. -You have two options:

    -

    Option 1 - Discovery By Name

    -

    Name your secret ${serviceAccountName}.service-account-token:

    -
    apiVersion: v1
    -kind: Secret
    -metadata:
    -  name: default.service-account-token
    -  annotations:
    -    kubernetes.io/service-account.name: default
    -type: kubernetes.io/service-account-token
    -
    -

    This option is simpler than option 2, as you can create the secret and make it discoverable by name at the same time.

    -

    Option 2 - Discovery By Annotation

    -

    Annotate the service account with the secret name:

    -
    apiVersion: v1
    -kind: ServiceAccount
    -metadata:
    -  name: default
    -  annotations:
    -    workflows.argoproj.io/service-account-token.name: my-token
    -
    -

    This option is useful when the secret already exists, or the service account has a very long name.

    - - - - -

    Comments

    - - +

    Service Account Secrets - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/memoization/index.html b/memoization/index.html index fa6f21e6df12..9e46febda156 100644 --- a/memoization/index.html +++ b/memoization/index.html @@ -1,4051 +1,68 @@ - - - - - - - - - - - - - Step Level Memoization - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Step Level Memoization - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Step Level Memoization

    -
    -

    v2.10 and after

    -
    -

    Introduction

    -

    Workflows often have outputs that are expensive to compute. -Memoization reduces cost and workflow execution time by recording the result of previously run steps: -it stores the outputs of a template into a specified cache with a variable key.

    -

    Prior to version 3.5 memoization only works for steps which have outputs, if you attempt to use it on steps which do not it should not work (there are some cases where it does, but they shouldn't). It was designed for 'pure' steps, where the purpose of running the step is to calculate some outputs based upon the step's inputs, and only the inputs. Pure steps should not interact with the outside world, but workflows won't enforce this on you.

    -

    If you are using workflows prior to version 3.5 you should look at the work avoidance technique instead of memoization if your steps don't have outputs.

    -

    In version 3.5 or later all steps can be memoized, whether or not they have outputs.

    -

    Cache Method

    -

    Currently, the cached data is stored in config-maps. -This allows you to easily manipulate cache entries manually through kubectl and the Kubernetes API without having to go through Argo. -All cache config-maps must have the label workflows.argoproj.io/configmap-type: Cache to be used as a cache. This prevents accidental access to other important config-maps in the system

    -

    Using Memoization

    -

    Memoization is set at the template level. You must specify a key, which can be static strings but more often depend on inputs. -You must also specify a name for the config-map cache. -Optionally you can set a maxAge in seconds or hours (e.g. 180s, 24h) to define how long should it be considered valid. If an entry is older than the maxAge, it will be ignored.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -   generateName: memoized-workflow-
    -spec:
    -   entrypoint: whalesay
    -   templates:
    -      - name: whalesay
    -        memoize:
    -           key: "{{inputs.parameters.message}}"
    -           maxAge: "10s"
    -           cache:
    -              configMap:
    -                 name: whalesay-cache
    -
    -

    Find a simple example for memoization here.

    -
    -

    Note

    -

    In order to use memoization it is necessary to add the verbs create and update to the configmaps resource for the appropriate (cluster) roles. In the case of a cluster install the argo-cluster-role cluster role should be updated, whilst for a namespace install the argo-role role should be updated.

    -
    -

    FAQ

    -
      -
    1. If you see errors like error creating cache entry: ConfigMap \"reuse-task\" is invalid: []: Too long: must have at most 1048576 characters, - this is due to the 1MB limit placed on the size of ConfigMap. - Here are a couple of ways that might help resolve this:
        -
      • Delete the existing ConfigMap cache or switch to use a different cache.
      • -
      • Reduce the size of the output parameters for the nodes that are being memoized.
      • -
      • Split your cache into different memoization keys and cache names so that each cache entry is small.
      • -
      -
    2. -
    3. My step isn't getting memoized, why not? - If you are running workflows <3.5 ensure that you have specified at least one output on the step.
    4. -
    - - - - -

    Comments

    - - +

    Step Level Memoization - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/metrics/index.html b/metrics/index.html index 414ac2c79f13..b54416c4a758 100644 --- a/metrics/index.html +++ b/metrics/index.html @@ -1,4619 +1,68 @@ - - - - - - - - - - - - - Prometheus Metrics - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Prometheus Metrics - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - - - - +
    +
    +
    +
    - - - - - - - - -

    Prometheus Metrics

    -
    -

    v2.7 and after

    -
    -

    Introduction

    -

    Argo emits a certain number of controller metrics that inform on the state of the controller at any given time. Furthermore, -users can also define their own custom metrics to inform on the state of their Workflows.

    -

    Custom Prometheus metrics can be defined to be emitted on a Workflow- and Template-level basis. These can be useful -for many cases; some examples:

    -
      -
    • Keeping track of the duration of a Workflow or Template over time, and setting an alert if it goes beyond a threshold
    • -
    • Keeping track of the number of times a Workflow or Template fails over time
    • -
    • Reporting an important internal metric, such as a model training score or an internal error rate
    • -
    -

    Emitting custom metrics with Argo is easy, but it's important to understand what makes a good Prometheus metric and the -best way to define metrics in Argo to avoid problems such as cardinality explosion.

    -

    Metrics and metrics in Argo

    -

    There are two kinds of metrics emitted by Argo: controller metrics and custom metrics.

    -

    Controller metrics

    -

    Metrics that inform on the state of the controller; i.e., they answer the question "What is the state of the controller right now?" -Default controller metrics can be scraped from service workflow-controller-metrics at the endpoint <host>:9090/metrics

    -

    Custom metrics

    -

    Metrics that inform on the state of a Workflow, or a series of Workflows. These custom metrics are defined by the user in the Workflow spec.

    -

    Emitting custom metrics is the responsibility of the emitter owner. Since the user defines Workflows in Argo, the user is responsible -for emitting metrics correctly.

    -

    What is and isn't a Prometheus metric

    -

    Prometheus metrics should be thought of as ephemeral data points of running processes; i.e., they are the answer to -the question "What is the state of my system right now?". Metrics should report things such as:

    -
      -
    • a counter of the number of times a workflow or steps has failed, or
    • -
    • a gauge of workflow duration, or
    • -
    • an average of an internal metric such as a model training score or error rate.
    • -
    -

    Metrics are then routinely scraped and stored and -- when they are correctly designed -- they can represent time series. -Aggregating the examples above over time could answer useful questions such as:

    -
      -
    • How has the error rate of this workflow or step changed over time?
    • -
    • How has the duration of this workflow changed over time? Is the current workflow running for too long?
    • -
    • Is our model improving over time?
    • -
    -

    Prometheus metrics should not be thought of as a store of data. Since metrics should only report the state of the system -at the current time, they should not be used to report historical data such as:

    -
      -
    • the status of an individual instance of a workflow, or
    • -
    • how long a particular instance of a step took to run.
    • -
    -

    Metrics are also ephemeral, meaning there is no guarantee that they will be persisted for any amount of time. If you need -a way to view and analyze historical data, consider the workflow archive or reporting to logs.

    -

    Default Controller Metrics

    -

    Metrics for the Four Golden Signals are:

    -
      -
    • Latency: argo_workflows_queue_latency
    • -
    • Traffic: argo_workflows_count and argo_workflows_queue_depth_count
    • -
    • Errors: argo_workflows_count and argo_workflows_error_count
    • -
    • Saturation: argo_workflows_workers_busy and argo_workflows_workflow_condition
    • -
    - - -

    argo_pod_missing

    -

    Pods were not seen. E.g. by being deleted by Kubernetes. You should only see this under high load.

    -
    -

    Note

    -

    This metric's name starts with argo_ not argo_workflows_.

    -
    -

    argo_workflows_count

    -

    Number of workflow in each phase. The Running count does not mean that a workflows pods are running, just that the controller has scheduled them. A workflow can be stuck in Running with pending pods for a long time.

    -

    argo_workflows_error_count

    -

    A count of certain errors incurred by the controller.

    -

    argo_workflows_k8s_request_total

    -

    Number of API requests sent to the Kubernetes API.

    -

    argo_workflows_operation_duration_seconds

    -

    A histogram of durations of operations. An operation is a single workflow reconciliation loop within the workflow-controller. It's the time for the controller to process a single workflow after it has been read from the cluster and is a measure of the performance of the controller affected by the complexity of the workflow.

    -

    argo_workflows_pods_count

    -

    It is possible for a workflow to start, but no pods be running (e.g. cluster is too busy to run them). This metric sheds light on actual work being done.

    -

    argo_workflows_queue_adds_count

    -

    The number of additions to the queue of workflows or cron workflows.

    -

    argo_workflows_queue_depth_count

    -

    The depth of the queue of workflows or cron workflows to be processed by the controller.

    -

    argo_workflows_queue_latency

    -

    The time workflows or cron workflows spend in the queue waiting to be processed.

    -

    argo_workflows_workers_busy

    -

    The number of workers that are busy.

    -

    argo_workflows_workflow_condition

    -

    The number of workflow with different conditions. This will tell you the number of workflows with running pods.

    -

    argo_workflows_workflows_processed_count

    -

    A count of all Workflow updates processed by the controller.

    -

    Metric types

    -

    Please see the Prometheus docs on metric types.

    -

    How metrics work in Argo

    -

    In order to analyze the behavior of a workflow over time, we need to be able to link different instances -(i.e. individual executions) of a workflow together into a "series" for the purposes of emitting metrics. We do so by linking them together -with the same metric descriptor.

    -

    In Prometheus, a metric descriptor is defined as a metric's name and its key-value labels. For example, for a metric -tracking the duration of model execution over time, a metric descriptor could be:

    -

    argo_workflows_model_exec_time{model_name="model_a",phase="validation"}

    -

    This metric then represents the amount of time that "Model A" took to train in the phase "Validation". It is important -to understand that the metric name and its labels form the descriptor: argo_workflows_model_exec_time{model_name="model_b",phase="validation"} -is a different metric (and will track a different "series" altogether).

    -

    Now, whenever we run our first workflow that validates "Model A" a metric with the amount of time it took it to do so will -be created and emitted. For each subsequent time that this happens, no new metrics will be emitted and the same metric -will be updated with the new value. Since, in effect, we are interested on the execution time of "validation" of "Model A" -over time, we are no longer interested in the previous metric and can assume it has already been scraped.

    -

    In summary, whenever you want to track a particular metric over time, you should use the same metric name and metric -labels wherever it is emitted. This is how these metrics are "linked" as belonging to the same series.

    -

    Grafana Dashboard for Argo Controller Metrics

    -

    Please see the Argo Workflows metrics Grafana dashboard.

    -

    Defining metrics

    -

    Metrics are defined in-place on the Workflow/Step/Task where they are emitted from. Metrics are always processed after -the Workflow/Step/Task completes, with the exception of real-time metrics.

    -

    Metric definitions must include a name and a help doc string. They can also include any number of labels (when -defining labels avoid cardinality explosion). Metrics with the same name must always use the same exact help string, -having different metrics with the same name, but with a different help string will cause an error (this is a Prometheus requirement).

    -

    All metrics can also be conditionally emitted by defining a when clause. This when clause works the same as elsewhere -in a workflow.

    -

    A metric must also have a type, it can be one of gauge, histogram, and counter (see below). Within -the metric type a value must be specified. This value can be either a literal value of be an Argo variable.

    -

    When defining a histogram, buckets must also be provided (see below).

    -

    Argo variables can be included anywhere in the metric spec, such as in labels, name, help, when, etc.

    -

    Metric names can only contain alphanumeric characters, _, and :.

    -

    Metric Spec

    -

    In Argo you can define a metric on the Workflow level or on the Template level. Here is an example of a Workflow -level Gauge metric that will report the Workflow duration time:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: model-training-
    -spec:
    -  entrypoint: steps
    -  metrics:
    -    prometheus:
    -      - name: exec_duration_gauge         # Metric name (will be prepended with "argo_workflows_")
    -        labels:                           # Labels are optional. Avoid cardinality explosion.
    -          - key: name
    -            value: model_a
    -        help: "Duration gauge by name"    # A help doc describing your metric. This is required.
    -        gauge:                            # The metric type. Available are "gauge", "histogram", and "counter".
    -          value: "{{workflow.duration}}"  # The value of your metric. It could be an Argo variable (see variables doc) or a literal value
    -
    -...
    -
    -

    An example of a Template-level Counter metric that will increase a counter every time the step fails:

    -
    ...
    -  templates:
    -    - name: flakey
    -      metrics:
    -        prometheus:
    -          - name: result_counter
    -            help: "Count of step execution by result status"
    -            labels:
    -              - key: name
    -                value: flakey
    -            when: "{{status}} == Failed"       # Emit the metric conditionally. Works the same as normal "when"
    -            counter:
    -              value: "1"                            # This increments the counter by 1
    -      container:
    -        image: python:alpine3.6
    -        command: ["python", -c]
    -        # fail with a 66% probability
    -        args: ["import random; import sys; exit_code = random.choice([0, 1, 1]); sys.exit(exit_code)"]
    -...
    -
    -

    A similar example of such a Counter metric that will increase for every step status

    -
    ...
    -  templates:
    -    - name: flakey
    -      metrics:
    -        prometheus:
    -          - name: result_counter
    -            help: "Count of step execution by result status"
    -            labels:
    -              - key: name
    -                value: flakey
    -              - key: status
    -                value: "{{status}}"    # Argo variable in `labels`
    -            counter:
    -              value: "1"
    -      container:
    -        image: python:alpine3.6
    -        command: ["python", -c]
    -        # fail with a 66% probability
    -        args: ["import random; import sys; exit_code = random.choice([0, 1, 1]); sys.exit(exit_code)"]
    -...
    -
    -

    Finally, an example of a Template-level Histogram metric that tracks an internal value:

    -
    ...
    -  templates:
    -    - name: random-int
    -      metrics:
    -        prometheus:
    -          - name: random_int_step_histogram
    -            help: "Value of the int emitted by random-int at step level"
    -            when: "{{status}} == Succeeded"    # Only emit metric when step succeeds
    -            histogram:
    -              buckets:                              # Bins must be defined for histogram metrics
    -                - 2.01                              # and are part of the metric descriptor.
    -                - 4.01                              # All metrics in this series MUST have the
    -                - 6.01                              # same buckets.
    -                - 8.01
    -                - 10.01
    -              value: "{{outputs.parameters.rand-int-value}}"         # References itself for its output (see variables doc)
    -      outputs:
    -        parameters:
    -          - name: rand-int-value
    -            globalName: rand-int-value
    -            valueFrom:
    -              path: /tmp/rand_int.txt
    -      container:
    -        image: alpine:latest
    -        command: [sh, -c]
    -        args: ["RAND_INT=$((1 + RANDOM % 10)); echo $RAND_INT; echo $RAND_INT > /tmp/rand_int.txt"]
    -...
    -
    -

    Real-Time Metrics

    -

    Argo supports a limited number of real-time metrics. These metrics are emitted in real-time, beginning when the step execution starts -and ending when it completes. Real-time metrics are only available on Gauge type metrics and with a limited number of variables.

    -

    To define a real-time metric simply add realtime: true to a gauge metric with a valid real-time variable. For example:

    -
      gauge:
    -    realtime: true
    -    value: "{{duration}}"
    -
    -

    Metrics endpoint

    -

    By default, metrics are emitted by the workflow-controller on port 9090 on the /metrics path. By port-forwarding to the pod you can view the metrics in your browser at http://localhost:9090/metrics:

    -

    kubectl -n argo port-forward deploy/workflow-controller 9090:9090

    -

    A metrics service is not installed as part of the default installation so you will need to add one if you wish to use a Prometheus Service Monitor:

    -
    cat <<EOF | kubectl apply -f -
    -apiVersion: v1
    -kind: Service
    -metadata:
    -  labels:
    -    app: workflow-controller
    -  name: workflow-controller-metrics
    -  namespace: argo
    -spec:
    -  ports:
    -  - name: metrics
    -    port: 9090
    -    protocol: TCP
    -    targetPort: 9090
    -  selector:
    -    app: workflow-controller
    ----
    -apiVersion: monitoring.coreos.com/v1
    -kind: ServiceMonitor
    -metadata:
    -  name: argo-workflows
    -  namespace: argo
    -spec:
    -  endpoints:
    -  - port: metrics
    -  selector:
    -    matchLabels:
    -      app: workflow-controller
    -EOF
    -
    -

    If you have more than one controller pod, using one as a hot-standby, you should use a headless service to ensure that each pod is being scraped so that no metrics are missed.

    -

    Metrics configuration

    -

    You can adjust various elements of the metrics configuration by changing values in the Workflow Controller Config Map.

    -
    metricsConfig: |
    -  # Enabled controls metric emission. Default is true, set "enabled: false" to turn off
    -  enabled: true
    -
    -  # Path is the path where metrics are emitted. Must start with a "/". Default is "/metrics"
    -  path: /metrics
    -
    -  # Port is the port where metrics are emitted. Default is "9090"
    -  port: 8080
    -
    -  # MetricsTTL sets how often custom metrics are cleared from memory. Default is "0", metrics are never cleared
    -  metricsTTL: "10m"
    -
    -  # IgnoreErrors is a flag that instructs prometheus to ignore metric emission errors. Default is "false"
    -  ignoreErrors: false
    -
    -  # Use a self-signed cert for TLS, default false
    -  secure: false
    -
    - - - - -

    Comments

    - - +

    Prometheus Metrics - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/node-field-selector/index.html b/node-field-selector/index.html index 54ca26251e15..f1c84c14397f 100644 --- a/node-field-selector/index.html +++ b/node-field-selector/index.html @@ -1,4079 +1,68 @@ - - - - - - - - - - - - - Node Field Selectors - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Node Field Selectors - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Node Field Selectors

    -
    -

    v2.8 and after

    -
    -

    Introduction

    -

    The resume, stop and retry Argo CLI and API commands support a --node-field-selector parameter to allow the user to select a subset of nodes for the command to apply to.

    -

    In the case of the resume and stop commands these are the nodes that should be resumed or stopped.

    -

    In the case of the retry command it allows specifying nodes that should be restarted even if they were previously successful (and must be used in combination with --restart-successful)

    -

    The format of this when used with the CLI is:

    -
    --node-field-selector=FIELD=VALUE
    -
    -

    Possible options

    -

    The field can be any of:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    displayNameDisplay name of the node. This is the name of the node as it is displayed on the CLI or UI, without considering its ancestors (see example below). This is a useful shortcut if there is only one node with the same displayName
    nameFull name of the node. This is the full name of the node, including its ancestors (see example below). Using name is necessary when two or more nodes share the same displayName and disambiguation is required.
    templateNameTemplate name of the node
    phasePhase status of the node - e.g. Running
    templateRef.nameThe name of the workflow template the node is referring to
    templateRef.templateThe template within the workflow template the node is referring to
    inputs.parameters.<NAME>.valueThe value of input parameter NAME
    -

    The operator can be '=' or '!='. Multiple selectors can be combined with a comma, in which case they are anded together.

    -

    Examples

    -

    To filter for nodes where the input parameter 'foo' is equal to 'bar':

    -
    --node-field-selector=inputs.parameters.foo.value=bar
    -
    -

    To filter for nodes where the input parameter 'foo' is equal to 'bar' and phase is not running:

    -
    --node-field-selector=foo1=bar1,phase!=Running
    -
    -

    Consider the following workflow:

    -
     ● appr-promotion-ffsv4    code-release
    - ├─✔ start                 sample-template/email                 appr-promotion-ffsv4-3704914002  2s
    - ├─● app1                  wftempl1/approval-and-promotion
    - │ ├─✔ notification-email  sample-template/email                 appr-promotion-ffsv4-524476380   2s
    - │ └─ǁ wait-approval       sample-template/waiting-for-approval
    - ├─✔ app2                  wftempl2/promotion
    - │ ├─✔ notification-email  sample-template/email                 appr-promotion-ffsv4-2580536603  2s
    - │ ├─✔ pr-approval         sample-template/approval              appr-promotion-ffsv4-3445567645  2s
    - │ └─✔ deployment          sample-template/promote               appr-promotion-ffsv4-970728982   1s
    - └─● app3                  wftempl1/approval-and-promotion
    -   ├─✔ notification-email  sample-template/email                 appr-promotion-ffsv4-388318034   2s
    -   └─ǁ wait-approval       sample-template/waiting-for-approval
    -
    -

    Here we have two steps with the same displayName: wait-approval. To select one to suspend, we need to use their -name, either appr-promotion-ffsv4.app1.wait-approval or appr-promotion-ffsv4.app3.wait-approval. If it is not clear -what the full name of a node is, it can be found using kubectl:

    -
    $ kubectl get wf appr-promotion-ffsv4 -o yaml
    -
    -...
    -    appr-promotion-ffsv4-3235686597:
    -      boundaryID: appr-promotion-ffsv4-3079407832
    -      displayName: wait-approval                        # <- Display Name
    -      finishedAt: null
    -      id: appr-promotion-ffsv4-3235686597
    -      name: appr-promotion-ffsv4.app1.wait-approval     # <- Full Name
    -      phase: Running
    -      startedAt: "2021-01-20T17:00:25Z"
    -      templateRef:
    -        name: sample-template
    -        template: waiting-for-approval
    -      templateScope: namespaced/wftempl1
    -      type: Suspend
    -...
    -
    - - - - -

    Comments

    - - +

    Node Field Selectors - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/offloading-large-workflows/index.html b/offloading-large-workflows/index.html index 8888b934a42c..72ee2f6dd013 100644 --- a/offloading-large-workflows/index.html +++ b/offloading-large-workflows/index.html @@ -1,4032 +1,68 @@ - - - - - - - - - - - - - Offloading Large Workflows - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Offloading Large Workflows - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - - - - +
    +
    +
    +
    - - - - - - - - -

    Offloading Large Workflows

    -
    -

    v2.4 and after

    -
    -

    Argo stores workflows as Kubernetes resources (i.e. within EtcD). This creates a limit to their size as resources must be under 1MB. Each resource includes the status of each node, which is stored in the /status/nodes field for the resource. This can be over 1MB. If this happens, we try and compress the node status and store it in /status/compressedNodes. If the status is still too large, we then try and store it in an SQL database.

    -

    To enable this feature, configure a Postgres or MySQL database under persistence in your configuration and set nodeStatusOffLoad: true.

    -

    FAQ

    -

    Why aren't my workflows appearing in the database?

    -

    Offloading is expensive and often unnecessary, so we only offload when we need to. Your workflows aren't probably large enough.

    -

    Error Failed to submit workflow: etcdserver: request is too large.

    -

    You must use the Argo CLI having exported export ARGO_SERVER=....

    -

    Error offload node status is not supported

    -

    Even after compressing node statuses, the workflow exceeded the EtcD -size limit. To resolve, either enable node status offload as described -above or look for ways to reduce the size of your workflow manifest:

    -
      -
    • Use withItems or withParams to consolidate similar templates into a single parametrized template
    • -
    • Use template defaults to factor shared template options to the workflow level
    • -
    • Use workflow templates to factor frequently-used templates into separate resources
    • -
    • Use workflows of workflows to factor a large workflow into a workflow of smaller workflows
    • -
    - - - - -

    Comments

    - - +

    Offloading Large Workflows - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/plugin-directory/index.html b/plugin-directory/index.html index 76476742f1aa..7802ff433525 100644 --- a/plugin-directory/index.html +++ b/plugin-directory/index.html @@ -1,3967 +1,68 @@ - - - - - - - - - - - - - Plugin Directory - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Plugin Directory - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Plugin Directory

    -

    ⚠️ Disclaimer: We take only minimal action to verify the authenticity of plugins. Install at your own risk.

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameDescription
    HelloHello world plugin you can use as a template
    SlackExample Slack plugin
    Argo CDSync Argo CD apps, e.g. to use Argo as CI
    Volcano Job PluginExecute Volcano Job
    PythonPlugin for executing Python
    HermesSend notifications, e.g. Slack
    WASMRun Web Assembly (WASM) tasks
    Chaos Mesh PluginRun Chaos Mesh experiment
    Pull Request Build StatusSend build status of pull request to Git provider
    Atomic Workflow PluginStop the workflows which comes from the same WorkflowTemplate and have the same parameters
    AWS PluginArgo Workflows Executor Plugin for AWS Services, e.g. SageMaker Pipelines, Glue, etc.
    - - - - -

    Comments

    - - +

    Plugin Directory - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/plugins/index.html b/plugins/index.html index b07893dc0d27..9c3452b3d42b 100644 --- a/plugins/index.html +++ b/plugins/index.html @@ -1,3922 +1,68 @@ - - - - - - - - - - - - - Plugins - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Plugins - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Plugins

    -

    Plugins

    -

    Plugins allow you to extend Argo Workflows to add new capabilities.

    -
      -
    • You don't need to learn Golang, you can write in any language, including Python.
    • -
    • Simple: a plugin just responds to RPC HTTP requests.
    • -
    • You can iterate quickly by changing the plugin at runtime.
    • -
    • You can get your plugin running today, no need to wait 3-5 months for review, approval, merge and an Argo software - release.
    • -
    -

    Executor plugins can be written and installed by both users and admins.

    - - - - -

    Comments

    - - +

    Plugins - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/progress/index.html b/progress/index.html index 52487711ccb9..32dc95ab3350 100644 --- a/progress/index.html +++ b/progress/index.html @@ -1,4018 +1,68 @@ - - - - - - - - - - - - - Workflow Progress - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Workflow Progress - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Workflow Progress

    -
    -

    v2.12 and after

    -
    -

    When you run a workflow, the controller will report on its progress.

    -

    We define progress as two numbers, N/M such that 0 <= N <= M and 0 <= M.

    -
      -
    • N is the number of completed tasks.
    • -
    • M is the total number of tasks.
    • -
    -

    E.g. 0/0, 0/1 or 50/100.

    -

    Unlike estimated duration, progress is deterministic. I.e. it will be the same for each workflow, regardless of any problems.

    -

    Progress for each node is calculated as follows:

    -
      -
    1. For a pod node either 1/1 if completed or 0/1 otherwise.
    2. -
    3. For non-leaf nodes, the sum of its children.
    4. -
    -

    For a whole workflow's, progress is the sum of all its leaf nodes.

    -
    -

    Warning

    -

    M will increase during workflow run each time a node is added to the graph.

    -
    -

    Self reporting progress

    -
    -

    v3.3 and after

    -
    -

    Pods in a workflow can report their own progress during their runtime. This self reported progress overrides the -auto-generated progress.

    -

    Reporting progress works as follows:

    -
      -
    • create and write the progress to a file indicated by the env variable ARGO_PROGRESS_FILE
    • -
    • format of the progress must be N/M
    • -
    -

    The executor will read this file every 3s and if there was an update, -patch the pod annotations with workflows.argoproj.io/progress: N/M. -The controller picks this up and writes the progress to the appropriate Status properties.

    -

    Initially the progress of a workflows' pod is always 0/1. If you want to influence this, make sure to set an initial -progress annotation on the pod:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: progress-
    -spec:
    -  entrypoint: main
    -  templates:
    -    - name: main
    -      dag:
    -        tasks:
    -          - name: progress
    -            template: progress
    -    - name: progress
    -      metadata:
    -        annotations:
    -          workflows.argoproj.io/progress: 0/100
    -      container:
    -        image: alpine:3.14
    -        command: [ "/bin/sh", "-c" ]
    -        args:
    -          - |
    -            for i in `seq 1 10`; do sleep 10; echo "$(($i*10))"'/100' > $ARGO_PROGRESS_FILE; done
    -
    - - - - -

    Comments

    - - +

    Workflow Progress - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/proposals/artifact-gc-proposal/index.html b/proposals/artifact-gc-proposal/index.html index d231506fb8d7..b11ba67535ef 100644 --- a/proposals/artifact-gc-proposal/index.html +++ b/proposals/artifact-gc-proposal/index.html @@ -1,4023 +1,68 @@ - - - - - - - - - - - - - Proposal for Artifact Garbage Collection - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Proposal for Artifact Garbage Collection - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    - -
    - - +
    +
    +
    +
    - - - - - - - - -

    Proposal for Artifact Garbage Collection

    -

    Introduction

    -

    The motivation for this is to enable users to automatically have certain Artifacts specified to be automatically garbage collected.

    -

    Artifacts can be specified for Garbage Collection at different stages: OnWorkflowCompletion, OnWorkflowDeletion, OnWorkflowSuccess, OnWorkflowFailure, or Never

    -

    Proposal Specifics

    -

    Workflow Spec changes

    -
      -
    1. WorkflowSpec has an ArtifactGC structure, which consists of an ArtifactGCStrategy, as well as the optional designation of a ServiceAccount and Pod metadata (labels and annotations) to be used by the Pod doing the deletion. The ArtifactGCStrategy can be set to OnWorkflowCompletion, OnWorkflowDeletion, OnWorkflowSuccess, OnWorkflowFailure, or Never
    2. -
    3. Artifact has an ArtifactGC section which can be used to override the Workflow level.
    4. -
    -

    Workflow Status changes

    -
      -
    1. Artifact has a boolean Deleted flag
    2. -
    3. WorkflowStatus.Conditions can be set to ArtifactGCError
    4. -
    5. WorkflowStatus can include a new field ArtGCStatus which holds additional information to keep track of the state of Artifact Garbage Collection.
    6. -
    -

    How it will work

    -

    For each ArtifactGCStrategy the Controller will execute one Pod that runs in the user's namespace and deletes all artifacts pertaining to that strategy.

    -

    Option 2 Flow

    -

    Since OnWorkflowSuccess happens at the same time as OnWorkflowCompletion and OnWorkflowFailure also happens at the same time as OnWorkflowCompletion, we can consider consolidating these GC Strategies together.

    -

    We will have a new CRD type called ArtifactGCTask and use one or more of them to specify the Artifacts which the GC Pod will read and then write Status to (note individual artifacts have individual statuses). The Controller will read the Status and reflect that in the Workflow Status. The Controller will deem the ArtifactGCTasks ready to read once the Pod has completed (in success or failure).

    -

    Once the GC Pod has completed and the Workflow status has been persisted, assuming the Pod completed with Success, the Controller can delete the ArtifactGCTasks, which will cause the GC Pod to also get deleted as it will be "owned" by the ArtifactGCTasks.

    -

    The Workflow will have a Finalizer on it to prevent it from being deleted until Artifact GC has occurred. Once all deletions for all GC Strategies have occurred, the Controller will remove the Finalizer.

    -

    Failures

    -

    If a deletion fails, the Pod will retry a few times through exponential back off. Note: it will not be considered a failure if the key does not exist - the principal of idempotence will allow this (i.e. if a Pod were to get evicted and then re-run it should be okay if some artifacts were previously deleted).

    -

    Once it retries a few times, if it didn't succeed, it will end in a "Failed" state. The user will manually need to delete the ArtifactGCTasks (which will delete the GC Pod), and remove the Finalizer on the Workflow.

    -

    The Failure will be reflected in both the Workflow Conditions as well as as a Kubernetes Event (and the Artifacts that failed will have "Deleted"=false).

    -

    Alternatives Considered

    -

    For reference, these slides were presented to the Argo Contributor meeting on 7/12/22 which go through some of the alternative options that were weighed. These alternatives are explained below:

    -

    One Pod Per Artifact

    -

    The POC that was done, which uses just one Pod to delete each Artifact, was considered as an alternative for MVP (Option 1 from the slides).

    -

    This option has these benefits:

    -
      -
    • simpler in that the Pod doesn't require any additional Object to report status (e.g. ArtifactGCTask) because it simply succeeds or fails based on its exit code (whereas in Option 2 the Pod needs to report individual failure statuses for each artifact)
    • -
    • could have a very minimal Service Account which provides access to just that one artifact's location
    • -
    -

    and these drawbacks:

    -
      -
    • deletion is slower when performed by multiple Pods
    • -
    • a Workflow with thousands of artifacts causes thousands of Pods to get executed, which could overwhelm kube-scheduler and kube-apiserver.
    • -
    • if we delay the Artifact GC Pods by giving them a lower priority than the Workflow Pods, users will not get their artifacts deleted when they expect and may log bugs
    • -
    -

    Summarizing ADR statement: -"In the context of Artifact Garbage Collection, facing whether to use a separate Pod for every artifact or not, we decided not to, to achieve faster garbage collection and reduced load on K8S, accepting that we will require a new CRD type."

    -

    Service Account/IAM roles

    -

    We considered some alternatives for how to specify Service Account and/or Annotations, which are applied to give the GC Pod access (slide 12). We will have them specify this information in a new ArtifactGC section of the spec that lives on the Workflow level but can be overridden on the Artifact level (option 3 from slide). Another option considered was to just allow specification on the Workflow level (option 2 from slide) so as to reduce the complexity of the code and reduce the potential number of Pods running, but Option 3 was selected in the end to maximize flexibility.

    -

    Summarizing ADR statement: -"In the context of Artifact Garbage Collection, facing the question of how users should specify Service Account and annotations, we decided to give them the option to specify them on the Workflow level and/or override them on the Artifact level, to maximize flexibility for user needs, accepting that the code will be more complicated, and sometimes there will be many Pods running."

    -

    MVP vs post-MVP

    -

    We will start with just S3.

    -

    We can also make other determinations if it makes sense to postpone some parts for after MVP.

    -

    Workflow Spec Validation

    -

    We can reject the Workflow during validation if ArtifactGC is configured along with a non-supported storage engine (for now probably anything besides S3).

    -

    Documentation

    -

    Need to clarify certain things in our documentation:

    -
      -
    1. Users need to know that if they don't name their artifacts with unique keys, they risk the same key being deleted by one Workflow and created by another at the same time. One recommendation is to parametrize the key, e.g. {{workflow.uid}}/hello.txt.
    2. -
    3. Requirement to specify Service Account or Annotation for ArtifactGC specifically if they are needed (we won't fall back to default Workflow SA/annotations). Also, the Service Account needs to either be bound to the "agent" role or otherwise allow the same access to ArtifactGCTasks.
    4. -
    - - - - -

    Comments

    - - +

    Proposal for Artifact Garbage Collection - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    + +
    - - - - - - \ No newline at end of file diff --git a/proposals/cron-wf-improvement-proposal/index.html b/proposals/cron-wf-improvement-proposal/index.html index 82842dddf540..fbc7316a51a3 100644 --- a/proposals/cron-wf-improvement-proposal/index.html +++ b/proposals/cron-wf-improvement-proposal/index.html @@ -1,4009 +1,68 @@ - - - - - - - - - - - - - Proposal for Cron Workflows improvements - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Proposal for Cron Workflows improvements - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Proposal for Cron Workflows improvements

    -

    Introduction

    -

    Currently, CronWorkflows are a great resource if we want to run recurring tasks to infinity. However, it is missing the ability to customize it, for example define how many times a workflow should run or how to handle multiple failures. I believe argo workflows would benefit of having more configuration options for cron workflows, to allow to change its behavior based on the result of its child’s success or failures. Below I present my thoughts on how we could improve them, but also some questions and concerns on how to properly do it.

    -

    Proposal

    -

    This proposal discusses the viability of adding 2 more fields into the cron workflow configuration:

    -
    RunStrategy:
    - maxSuccess:
    - maxFailures:
    -
    -

    maxSuccess - defines how many child workflows must have success before suspending the workflow schedule

    -

    maxFailures - defines how many child workflows must fail before suspending the workflow scheduling. This may contain Failed workflows, Errored workflows or spec errors.

    -

    For example, if we want to run a workflow just once, we could just set:

    -
    RunStrategy:
    - maxSuccess: 1
    -
    -

    This configuration will make sure the controller will keep scheduling workflows until one of them finishes with success.

    -

    As another example, if we want to stop scheduling workflows when they keep failing, we could configure the CronWorkflow with:

    -
    RunStrategy:
    - maxFailures: 2
    -
    -

    This config will stop scheduling workflows if fails twice.

    -

    Total vs consecutive

    -

    One aspect that needs to be discussed is whether these configurations apply to the entire life of a cron Workflow or just in consecutive schedules. For example, if we configure a workflow to stop scheduling after 2 failures, I think it makes sense to have this applied when it fails twice consecutively. Otherwise, we can have 2 outages in different periods which will suspend the workflow. On the other hand, when configuring a workflow to run twice with success, it would make more sense to have it execute with success regardless of whether it is a consecutive success or not. If we have an outage after the first workflow succeeds, which translates into failed workflows, it should need to execute with success only once. So I think it would make sense to have:

    -
      -
    • -

      maxFailures - maximum number of consecutive failures before stopping the scheduling of a workflow

      -
    • -
    • -

      maxSuccess - maximum number of workflows with success.

      -
    • -
    -

    How to store state

    -

    Since we need to control how many child workflows had success/failure we must store state. With this some questions arise:

    -
      -
    • -

      Should we just store it through the lifetime of the controller or should we store it to a database?

      -
        -
      • Probably only makes sense if we can backup the state somewhere (like a BD). However, I don't have enough knowledge about workflow's architecture to tell how good of an idea this is.
      • -
      • -

        If a CronWorkflow gets re-applied, does it maintain or reset the number of success/failures?

        -
      • -
      • -

        I guess it should reset since a configuration change should be seen as a new start.

        -
      • -
      -
    • -
    -

    How to stop the workflow

    -

    Once the configured number of failures or successes is reached, it is necessary to stop the workflow scheduling. -I believe we have 3 options:

    -
      -
    • Delete the workflow: In my opinion, this is the worst option and goes against gitops principles.
    • -
    • Suspend it (set suspend=true): the workflow spec is changed to have the workflow suspended. I may be wrong but this conflicts with gitops as well.
    • -
    • Stop scheduling it: The workflow spec is the same. The controller needs to check if the max number of runs was already attained and skip scheduling if it did.
    • -
    -

    Option 3 seems to be the only possibility. After reaching the max configured executions, the cron workflow would exist forever but never scheduled. Maybe we could add a new status field, like Inactive and have something the UI to show it?

    -

    How to handle suspended workflows

    -

    One possible case that comes to mind is a long outage where all workflows are failing. For example, imagine a workflow that needs to download a file from some storage and for some reason that storage is down. Workflows will keep getting scheduled but they are going to fail. If they fail the number of configured maxFailures, the workflows gets stopped forever. Once the storage is back up, how can the user enable the workflow again?

    -
      -
    • Manually re-create the workflow: could be an issue if the user has multiple cron workflows
    • -
    • Instead of stopping the workflow scheduling, introduce a back-off period as suggested by #7291. Or maybe allow both configurations.
    • -
    -

    I believe option 2 would allow the user to select if they want to stop scheduling or not. If they do, when cron workflows are wrongfully halted, they will need to manually start them again. If they don't, Argo will only introduce a back-off period between schedules to avoid rescheduling workflows that are just going to fail. Spec could look something like:

    -
    RunStrategy:
    - maxSuccess:
    - maxFailures:
    -  value: # this would be optional
    -  back-off:
    -   enabled: true
    -   factor: 2
    -
    -

    With this configuration the user could configure 3 behaviors:

    -
      -
    1. set value if they wanted to stop scheduling a workflow after a maximum number of consecutive failures.
    2. -
    3. set value and back-off if they wanted to stop scheduling a workflow after a maximum number of consecutive failures but with a back-off period between each failure
    4. -
    5. set back-off if they want a back-off period between each failure but they never want to stop the workflow scheduling.
    6. -
    -

    Wrap up

    -

    I believe this feature would enhance the cron workflows to allow more specific use cases that are commonly requested by the community, such as running a workflow only once. This proposal raises some concerns on how to properly implement it and I would like to know the maintainers/contributors opinion on these 4 topics, but also some other issues that I couldn't think of.

    -

    Resources

    -
      -
    • This discussion was prompted by #10620
    • -
    • A first approach to this problem was discussed in 5659
    • -
    • A draft PR to implement the first approach #5662
    • -
    - - - - -

    Comments

    - - +

    Proposal for Cron Workflows improvements - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    + +
    - - - - - - \ No newline at end of file diff --git a/proposals/makefile-improvement-proposal/index.html b/proposals/makefile-improvement-proposal/index.html index cd51762483bf..071c24dfb1be 100644 --- a/proposals/makefile-improvement-proposal/index.html +++ b/proposals/makefile-improvement-proposal/index.html @@ -1,4035 +1,68 @@ - - - - - - - - - - - - - Proposal for Makefile improvements - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Proposal for Makefile improvements - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Proposal for Makefile improvements

    -

    Introduction

    -

    The motivation for this proposal is to enable developers working on Argo Workflows to use build tools in a more reproducible way. -Currently the Makefile is unfortunately too opinionated and as a result is often a blocker when first setting up Argo Workflows locally. -I believe we should shrink the responsibilities of the Makefile and where possible outsource areas of responsibility to more specialized technology, such -as Devenv/Nix in the case of dependency management.

    -

    Proposal Specifics

    -

    In order to better address reproducibility, it is better to split up the duties the Makefile currently performs into various sub components, that can be assembled in more appropriate technology. One important aspect here is to completely shift the responsibility of dependency management away from the Makefile and into technology such as Nix or Devenv. This proposal will also enable quicker access to a development build of Argo Workflows to developers, reducing the costs of on-boarding and barrier to entry.

    -

    Devenv

    -

    Benefits of Devenv

    -
      -
    • Reproducible build environment
    • -
    • Ability to run processes
    • -
    -

    Disadvantages of Devenv

    -
      -
    • Huge learning curve to tap into Nix functionality
    • -
    • Less documentation
    • -
    -

    Nix

    -

    Benefits of Nix

    -
      -
    • Reproducible build environment
    • -
    • Direct raw control of various Nix related functionality instead of using Devenv
    • -
    • More documentation
    • -
    -

    Disadvantages of Nix

    -
      -
    • Huge learning curve
    • -
    -

    Recommendation

    -

    I suggest that we use Nix over Devenv. I believe that our build environment is unique enough that we will be tapping into Nix anyway, it probably makes sense to directly use Nix in that case.

    -

    Proposal

    -

    In order to maximize the benefit we receive from using something like Nix, I suggest that we initially start off with a modest change to the Makefile. -The first proposal would be to remove out all dependency management code and replace this functionality with Nix, where it is trivially possible. This may not be possible for some go lang related binaries we use, we will retain the Makefile functionality in those cases, at least for a while. Eventually we will migrate more and more of this responsibility away from the Makefile. Following Nix being responsible for all dependency management, we could start to consider moving more of our build system itself into Nix, perhaps it is easiest to start off with UI build as it is relatively painless. However, do note that this is not a requirement, I do not see a problem with the Makefile and the Nix file co-existing, it is more about finding a good balance between the reproducibility we desire and the effort we put into obtaining said reproducibility. An example for a replacement could be this dependency for example, note that we do not state any version here, replacing such installations with Nix based installations will ensure that we can ensure that if a build works on a certain developer's machine, it should also work on every other machine as well.

    -

    What will Nix get us?

    -

    As mentioned previously Nix gets us closer to reproducible build environments. It should ease significantly the on-boarding process of developers onto the project. -There have been several developers who wanted to work on Argo Workflows but found the Makefile to be a barrier, it is likely that there are more developers on this boat. With a reproducible build environment, we hope that -everyone who would like to contribute to the project is able to do so easily. It should also save time for engineers on-boarding onto the project, especially if they are using a system that is not Ubuntu or OSX.

    -

    What will Nix cost us?

    -

    If we proceed further with Nix, it will require some amount of people working on Argo Workflows to learn it, this is not a trivial task by any means. -It will increase the barrier when it comes to changes that are build related, however, this isn't necessarily bad as build related changes should be far less frequent, the friction we will endure here is likely manageable.

    -

    How will developers use nix?

    -

    In the case that both Nix and the Makefile co-exist, we could use nix inside the Makefile itself. The Makefile calls into Nix to setup a developer environment with all dependencies, it will then continue the rest of the Makefile execution as normal. -Following a complete or near complete migration to Nix, we can use nix-build for more of our tasks. An example of a C++ project environment is provided here

    -

    Resources

    - - - - - -

    Comments

    - - +

    Proposal for Makefile improvements - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    + +
    - - - - - - \ No newline at end of file diff --git a/public-api/index.html b/public-api/index.html index f5f3e21803af..a44f27511c80 100644 --- a/public-api/index.html +++ b/public-api/index.html @@ -1,3916 +1,68 @@ - - - - - - - - - - - - - Public API - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Public API - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Public API

    -

    Argo Workflows public API is defined by the following:

    -
      -
    • The file api/openapi-spec/swagger.json
    • -
    • The schema of the table argo_archived_workflows.
    • -
    • The installation options.
    • -
    - - - - -

    Comments

    - - +

    Public API - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/quick-start/index.html b/quick-start/index.html index d631fed2e4d3..c04fc64827cc 100644 --- a/quick-start/index.html +++ b/quick-start/index.html @@ -1,4142 +1,68 @@ - - - - - - - - - - - - - Quick Start - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Quick Start - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - - - - +
    +
    +
    +
    - - - - - - - - -

    Quick Start

    -

    To see how Argo Workflows work, you can install it and run examples of simple workflows.

    -

    Before you start you need a Kubernetes cluster and kubectl set up to be able to access that cluster. For the purposes of getting up and running, a local cluster is fine. You could consider the following local Kubernetes cluster options:

    - -

    Alternatively, if you want to try out Argo Workflows and don't want to set up a Kubernetes cluster, try the Killercoda course.

    -
    -

    Development vs. Production

    -

    These instructions are intended to help you get started quickly. They are not suitable in production. For production installs, please refer to the installation documentation.

    -
    -

    Install Argo Workflows

    -

    To install Argo Workflows, navigate to the releases page and find the release you wish to use (the latest full release is preferred).

    -

    Scroll down to the Controller and Server section and execute the kubectl commands.

    -

    Below is an example of the install commands, ensure that you update the command to install the correct version number:

    -
    kubectl create namespace argo
    -kubectl apply -n argo -f https://github.com/argoproj/argo-workflows/releases/download/v<<ARGO_WORKFLOWS_VERSION>>/install.yaml
    -
    -

    Patch argo-server authentication

    -

    The argo-server (and thus the UI) defaults to client authentication, which requires clients to provide their Kubernetes bearer token in order to authenticate. For more information, refer to the Argo Server Auth Mode documentation. We will switch the authentication mode to server so that we can bypass the UI login for now:

    -
    kubectl patch deployment \
    -  argo-server \
    -  --namespace argo \
    -  --type='json' \
    -  -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/args", "value": [
    -  "server",
    -  "--auth-mode=server"
    -]}]'
    -
    -

    Port-forward the UI

    -

    Open a port-forward so you can access the UI:

    -
    kubectl -n argo port-forward deployment/argo-server 2746:2746
    -
    -

    This will serve the UI on https://localhost:2746. Due to the self-signed certificate, you will receive a TLS error which you will need to manually approve.

    -
    -

    Pay close attention to the URI. It uses https and not http. Navigating to http://localhost:2746 result in server-side error that breaks the port-forwarding.

    -
    -

    Install the Argo Workflows CLI

    -

    You can more easily interact with Argo Workflows with the Argo CLI.

    -

    Submitting an example workflow

    -

    Submit an example workflow (CLI)

    -
    argo submit -n argo --watch https://raw.githubusercontent.com/argoproj/argo-workflows/main/examples/hello-world.yaml
    -
    -

    The --watch flag used above will allow you to observe the workflow as it runs and the status of whether it succeeds. -When the workflow completes, the watch on the workflow will stop.

    -

    You can list all the Workflows you have submitted by running the command below:

    -
    argo list -n argo
    -
    -

    You will notice the Workflow name has a hello-world- prefix followed by random characters. These characters are used -to give Workflows unique names to help identify specific runs of a Workflow. If you submitted this Workflow again, -the next Workflow run would have a different name.

    -

    Using the argo get command, you can always review details of a Workflow run. The output for the command below will -be the same as the information shown as when you submitted the Workflow:

    -
    argo get -n argo @latest
    -
    -

    The @latest argument to the CLI is a short cut to view the latest Workflow run that was executed.

    -

    You can also observe the logs of the Workflow run by running the following:

    -
    argo logs -n argo @latest
    -
    -

    Submit an example workflow (GUI)

    -
      -
    • Open a port-forward so you can access the UI:
    • -
    -
    kubectl -n argo port-forward deployment/argo-server 2746:2746
    -
    -
      -
    • -

      Navigate your browser to https://localhost:2746.

      -
    • -
    • -

      Click + Submit New Workflow and then Edit using full workflow options

      -
    • -
    • -

      You can find an example workflow already in the text field. Press + Create to start the workflow.

      -
    • -
    - - - - -

    Comments

    - - +

    Quick Start - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/releases/index.html b/releases/index.html index 283254a8f84a..7ffb37b772e5 100644 --- a/releases/index.html +++ b/releases/index.html @@ -1,4160 +1,68 @@ - - - - - - - - - - - - - Releases - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Releases - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Releases

    -

    You can find the most recent version under Github release.

    -

    Versioning

    -

    Versions are expressed as x.y.z, where x is the major version, y is the minor version, and z is the patch version, -following Semantic Versioning terminology.

    -

    Argo Workflows does not use Semantic Versioning. Minor versions may contain breaking changes. Patch versions only -contain bug fixes and minor features.

    -

    For stable, use the latest patch version.

    -

    ⚠️ Read the upgrading guide to find out about breaking changes before any upgrade.

    -

    Supported Versions

    -

    We maintain release branches for the most recent two minor releases.

    -

    Fixes may be back-ported to release branches, depending on severity, risk, and, feasibility.

    -

    Breaking changes will be documented in upgrading guide.

    -

    Supported Version Skew

    -

    Both the argo-server and argocli should be the same version as the controller.

    -

    Release Cycle

    -

    New minor versions are released roughly every 6 months.

    -

    Release candidates (RCs) for major and minor releases are typically available for 4-6 weeks before the release becomes generally available (GA). Features may be shipped in subsequent release candidates.

    -

    When features are shipped in a new release candidate, the most recent release candidate will be available for at least 2 weeks to ensure it is tested sufficiently before it is pushed to GA. If bugs are found with a feature and are not resolved within the 2 week period, the features will be rolled back so as to be saved for the next major/minor release timeline, and a new release candidate will be cut for testing before pushing to GA.

    -

    Otherwise, we typically release every two weeks:

    -
      -
    • Patch fixes for the current stable version.
    • -
    • The next release candidate, if we are currently in a release-cycle.
    • -
    -

    Kubernetes Compatibility Matrix

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Argo Workflows \ Kubernetes1.171.181.191.201.211.221.231.241.251.261.27
    3.5xxx?????
    3.4xxx?
    3.3????????
    3.2????????
    3.1????????
    -
      -
    • Fully supported versions.
    • -
    • ? Due to breaking changes might not work. Also, we haven't thoroughly tested against this version.
    • -
    • Unsupported versions.
    • -
    -

    Notes on Compatibility

    -

    Argo versions may be compatible with newer and older versions than what it is listed but only three minor versions are supported per Argo release unless otherwise noted.

    -

    The main branch of Argo Workflows is currently tested on Kubernetes 1.27.

    - - - - -

    Comments

    - - +

    Releases - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/releasing/index.html b/releasing/index.html index 499e1f0bfcf1..783fcba59d26 100644 --- a/releasing/index.html +++ b/releasing/index.html @@ -1,4026 +1,68 @@ - - - - - - - - - - - - - Release Instructions - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Release Instructions - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Release Instructions

    -

    Cherry-Picking Fixes

    -

    ✋ Before you start, make sure you have created a release branch (e.g. release-3.3) and it's passing CI.

    -

    Then get a list of commits you may want to cherry-pick:

    -
    ./hack/cherry-pick.sh release-3.3 "fix" true
    -./hack/cherry-pick.sh release-3.3 "chore(deps)" true
    -./hack/cherry-pick.sh release-3.3 "build" true
    -./hack/cherry-pick.sh release-3.3 "ci" true
    -
    -

    To automatically cherry-pick, run the following:

    -
    ./hack/cherry-pick.sh release-3.3 "fix" false
    -
    -

    Then look for "failed to cherry-pick" in the log to find commits that fail to be cherry-picked and decide if a -manual patch is necessary.

    -

    Ignore:

    -
      -
    • Fixes for features only on main.
    • -
    • Dependency upgrades, unless they fix known security issues.
    • -
    • Build or CI improvements, unless the release pipeline is blocked without them.
    • -
    -

    Cherry-pick the first commit. Run make test locally before pushing. If the build timeouts the build caches may have -gone, try re-running.

    -

    Don't cherry-pick another commit until the CI passes. It is harder to find the cause of a new failed build if the last -build failed too.

    -

    Cherry-picking commits one-by-one and then waiting for the CI will take a long time. Instead, cherry-pick each commit then -run make test locally before pushing.

    -

    Publish Release

    -

    ✋ Before you start, make sure the branch is passing CI.

    -

    Push a new tag to the release branch. E.g.:

    -
    git tag v3.3.4
    -git push upstream v3.3.4 # or origin if you do not use upstream
    -
    -

    GitHub Actions will automatically build and publish your release. This takes about 1h. Set your self a reminder to check -this was successful.

    -

    Update Changelog

    -

    Once the tag is published, GitHub Actions will automatically open a PR to update the changelog. Once the PR is ready, -you can approve it, enable auto-merge, and then run the following to force trigger the CI build:

    -
    git branch -D create-pull-request/changelog
    -git fetch upstream
    -git checkout --track upstream/create-pull-request/changelog
    -git commit -s --allow-empty -m "docs: Force trigger CI"
    -git push upstream create-pull-request/changelog
    -
    - - - - -

    Comments

    - - +

    Release Instructions - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/resource-duration/index.html b/resource-duration/index.html index 445848bd7e34..faf34430d0fe 100644 --- a/resource-duration/index.html +++ b/resource-duration/index.html @@ -1,4082 +1,68 @@ - - - - - - - - - - - - - Resource Duration - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Resource Duration - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Resource Duration

    -
    -

    v2.7 and after

    -
    -

    Argo Workflows provides an indication of how much resource your workflow has used and saves this -information. This is intended to be an indicative but not accurate value.

    -

    Calculation

    -

    The calculation is always an estimate, and is calculated by duration.go -based on container duration, specified pod resource requests, limits, or (for memory and CPU) -defaults.

    -

    Each indicator is divided by a common denominator depending on resource type.

    -

    Base Amounts

    -

    Each resource type has a denominator used to make large values smaller.

    -
      -
    • CPU: 1
    • -
    • Memory: 100Mi
    • -
    • Storage: 10Gi
    • -
    • Ephemeral Storage: 10Gi
    • -
    • All others: 1
    • -
    -

    The requested fraction of the base amount will be multiplied by the container's run time to get -the container's Resource Duration.

    -

    For example, if you've requested 50Mi of memory (half of the base amount), and the container -runs 120sec, then the reported Resource Duration will be 60sec * (100Mi memory).

    -

    Request Defaults

    -

    If requests are not set for a container, Kubernetes defaults to limits. If limits are not set, -Argo falls back to 100m for CPU and 100Mi for memory.

    -

    Note: these are Argo's defaults, not Kubernetes' defaults. For the most meaningful results, -set requests and/or limits for all containers.

    -

    Example

    -

    A pod that runs for 3min, with a CPU limit of 2000m, a memory limit of 1Gi and an nvidia.com/gpu -resource limit of 1:

    -
    CPU:    3min * 2000m / 1000m = 6min * (1 cpu)
    -Memory: 3min * 1Gi / 100Mi   = 30min * (100Mi memory)
    -GPU:    3min * 1     / 1     = 3min * (1 nvidia.com/gpu)
    -
    -

    Web/CLI reporting

    -

    Both the web and CLI give abbreviated usage, like 9m10s*cpu,6s*memory,2m31s*nvidia.com/gpu. In -this context, resources like memory refer to the "base amounts".

    -

    For example, memory means "amount of time a resource requested 100Mi of memory." If a container only -uses 10Mi, each second it runs will only count as a tenth-second of memory.

    -

    Rounding Down

    -

    For a short running pods (<10s), if the memory request is also small (for example, 10Mi), then the memory value may be 0s. This is because the denominator is 100Mi.

    - - - - -

    Comments

    - - +

    Resource Duration - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/resource-template/index.html b/resource-template/index.html index 4ae22ce5c1bb..1f414c0223e7 100644 --- a/resource-template/index.html +++ b/resource-template/index.html @@ -1,3916 +1,68 @@ - - - - - - - - - - - - - Resource Template - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Resource Template - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/rest-api/index.html b/rest-api/index.html index b44a53e33def..51d14bcb171f 100644 --- a/rest-api/index.html +++ b/rest-api/index.html @@ -1,3975 +1,68 @@ - - - - - - - - - - - - - REST API - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + REST API - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    REST API

    -

    Argo Server API

    -
    -

    v2.5 and after

    -
    -

    Argo Workflows ships with a server that provides more features and security than before.

    -

    The server can be configured with or without client auth (server --auth-mode client). When it is disabled, then clients must pass their KUBECONFIG base 64 encoded in the HTTP Authorization header:

    -
    ARGO_TOKEN=$(argo auth token)
    -curl -H "Authorization: $ARGO_TOKEN" https://localhost:2746/api/v1/workflows/argo
    -
    - -

    API reference docs :

    - - - - - -

    Comments

    - - +

    REST API - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/rest-examples/index.html b/rest-examples/index.html index 68ae4d5861ec..eadc3d8c54bf 100644 --- a/rest-examples/index.html +++ b/rest-examples/index.html @@ -1,4064 +1,68 @@ - - - - - - - - - - - - - API Examples - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + API Examples - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - - - - +
    +
    +
    +
    - - - - - - - - -

    API Examples

    -

    Document contains couple of examples of workflow JSON's to submit via argo-server REST API.

    -
    -

    v2.5 and after

    -
    -

    Assuming

    -
      -
    • the namespace of argo-server is argo
    • -
    • authentication is turned off (otherwise provide Authorization header)
    • -
    • argo-server is available on localhost:2746
    • -
    -

    Submitting workflow

    -
    curl --request POST \
    -  --url https://localhost:2746/api/v1/workflows/argo \
    -  --header 'content-type: application/json' \
    -  --data '{
    -  "namespace": "argo",
    -  "serverDryRun": false,
    -  "workflow": {
    -      "metadata": {
    -        "generateName": "hello-world-",
    -        "namespace": "argo",
    -        "labels": {
    -          "workflows.argoproj.io/completed": "false"
    -         }
    -      },
    -     "spec": {
    -       "templates": [
    -        {
    -         "name": "whalesay",
    -         "arguments": {},
    -         "inputs": {},
    -         "outputs": {},
    -         "metadata": {},
    -         "container": {
    -          "name": "",
    -          "image": "docker/whalesay:latest",
    -          "command": [
    -            "cowsay"
    -          ],
    -          "args": [
    -            "hello world"
    -          ],
    -          "resources": {}
    -        }
    -      }
    -    ],
    -    "entrypoint": "whalesay",
    -    "arguments": {}
    -  }
    -}
    -}'
    -
    -

    Getting workflows for namespace argo

    -
    curl --request GET \
    -  --url https://localhost:2746/api/v1/workflows/argo
    -
    -

    Getting single workflow for namespace argo

    -
    curl --request GET \
    -  --url https://localhost:2746/api/v1/workflows/argo/abc-dthgt
    -
    -

    Deleting single workflow for namespace argo

    -
    curl --request DELETE \
    -  --url https://localhost:2746/api/v1/workflows/argo/abc-dthgt
    -
    - - - - -

    Comments

    - - +

    API Examples - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/retries/index.html b/retries/index.html index cdf3346f97f3..5f3108320076 100644 --- a/retries/index.html +++ b/retries/index.html @@ -1,4086 +1,68 @@ - - - - - - - - - - - - - Retries - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Retries - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Retries

    -

    Argo Workflows offers a range of options for retrying failed steps.

    -

    Configuring retryStrategy in WorkflowSpec

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: retry-container-
    -spec:
    -  entrypoint: retry-container
    -  templates:
    -  - name: retry-container
    -    retryStrategy:
    -      limit: "10"
    -    container:
    -      image: python:alpine3.6
    -      command: ["python", -c]
    -      # fail with a 66% probability
    -      args: ["import random; import sys; exit_code = random.choice([0, 1, 1]); sys.exit(exit_code)"]
    -
    -

    The retryPolicy and expression are re-evaluated after each attempt. For example, if you set retryPolicy: OnFailure and your first attempt produces a failure then a retry will be attempted. If the second attempt produces an error, then another attempt will not be made.

    -

    Retry policies

    -

    Use retryPolicy to choose which failure types to retry:

    -
      -
    • Always: Retry all failed steps
    • -
    • OnFailure: Retry steps whose main container is marked as failed in Kubernetes
    • -
    • OnError: Retry steps that encounter Argo controller errors, or whose init or wait containers fail
    • -
    • OnTransientError: Retry steps that encounter errors defined as transient, or errors matching the TRANSIENT_ERROR_PATTERN environment variable. Available in version 3.0 and later.
    • -
    -

    The retryPolicy applies even if you also specify an expression, but in version 3.5 or later the default policy means the expression makes the decision unless you explicitly specify a policy.

    -

    The default retryPolicy is OnFailure, except in version 3.5 or later when an expression is also supplied, when it is Always. This may be easier to understand in this diagram.

    -
    flowchart LR
    -  start([Will a retry be attempted])
    -  start --> policy
    -  policy(Policy Specified?)
    -  policy-->|No|expressionNoPolicy
    -  policy-->|Yes|policyGiven
    -  policyGiven(Expression Specified?)
    -  policyGiven-->|No|policyGivenApplies
    -  policyGiven-->|Yes|policyAndExpression
    -  policyGivenApplies(Supplied Policy)
    -  policyAndExpression(Supplied Policy AND Expression)
    -  expressionNoPolicy(Expression specified?)
    -  expressionNoPolicy-->|No|onfailureNoExpr
    -  expressionNoPolicy-->|Yes|version
    -  onfailureNoExpr[OnFailure]
    -  onfailure[OnFailure AND Expression]
    -  version(Workflows version)
    -  version-->|3.4 or ealier|onfailure
    -  always[Only Expression matters]
    -  version-->|3.5 or later|always
    -
    -

    An example retry strategy:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: retry-on-error-
    -spec:
    -  entrypoint: error-container
    -  templates:
    -  - name: error-container
    -    retryStrategy:
    -      limit: "2"
    -      retryPolicy: "Always"
    -    container:
    -      image: python
    -      command: ["python", "-c"]
    -      # fail with a 80% probability
    -      args: ["import random; import sys; exit_code = random.choice(range(0, 5)); sys.exit(exit_code)"]
    -
    -

    Conditional retries

    -
    -

    v3.2 and after

    -
    -

    You can also use expression to control retries. The expression field -accepts an expr expression and has -access to the following variables:

    -
      -
    • lastRetry.exitCode: The exit code of the last retry, or "-1" if not available
    • -
    • lastRetry.status: The phase of the last retry: Error, Failed
    • -
    • lastRetry.duration: The duration of the last retry, in seconds
    • -
    • lastRetry.message: The message output from the last retry (available from version 3.5)
    • -
    -

    If expression evaluates to false, the step will not be retried.

    -

    The expression result will be logical and with the retryPolicy. Both must be true to retry.

    -

    See example for usage.

    -

    Back-Off

    -

    You can configure the delay between retries with backoff. See example for usage.

    - - - - -

    Comments

    - - +

    Retries - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/roadmap/index.html b/roadmap/index.html index aaf1b68ea2b3..43479f95eac8 100644 --- a/roadmap/index.html +++ b/roadmap/index.html @@ -1,3894 +1,68 @@ - - - - - - - - - - - - - Roadmap - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Roadmap - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/running-at-massive-scale/index.html b/running-at-massive-scale/index.html index 15d0edd4d892..43827092f57c 100644 --- a/running-at-massive-scale/index.html +++ b/running-at-massive-scale/index.html @@ -1,4040 +1,68 @@ - - - - - - - - - - - - - Running At Massive Scale - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Running At Massive Scale - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    - -
    - - +
    +
    +
    +
    - - - - - - - - -

    Running At Massive Scale

    -

    Argo Workflows is an incredibly scalable tool for orchestrating workflows. It empowers you to process thousands of workflows per day, with each workflow consisting of tens of thousands of nodes. Moreover, it effortlessly handles hundreds of thousands of smaller workflows daily. However, optimizing your setup is crucial to fully leverage this capability.

    -

    Run The Latest Version

    -

    You must be running at least v3.1 for several recommendations to work. Upgrade to the very latest patch. Performance -fixes often come in patches.

    -

    Test Your Cluster Before You Install Argo Workflows

    -

    You'll need a big cluster, with a big Kubernetes master.

    -

    Users often encounter problems with Kubernetes needing to be configured for the scale. E.g. Kubernetes API server being -too small. We recommend you test your cluster to make sure it can run the number of pods they need, even before -installing Argo. Create pods at the rate you expect that it'll be created in production. Make sure Kubernetes can keep -up with requests to delete pods at the same rate.

    -

    You'll need to GC data quickly. The less data that Kubernetes and Argo deal with, the less work they need to do. Use -pod GC and workflow GC to achieve this.

    -

    Overwhelmed Kubernetes API

    -

    Where Argo has a lot of work to do, the Kubernetes API can be overwhelmed. There are several strategies to reduce this:

    -
      -
    • Use the Emissary executor (>= v3.1). This does not make any Kubernetes API requests (except for resources template).
    • -
    • Limit the number of concurrent workflows using parallelism.
    • -
    • Rate-limit pod creation configuration (>= v3.1).
    • -
    • Set DEFAULT_REQUEUE_TIME=1m
    • -
    -

    Overwhelmed Database

    -

    If you're running workflows with many nodes, you'll probably be offloading data to a database. Offloaded data is kept -for 5m. You can reduce the number of records created by setting DEFAULT_REQUEUE_TIME=1m. This will slow reconciliation, -but will suit workflows where nodes run for over 1m.

    -

    Miscellaneous

    -

    See also Scaling.

    - - - - -

    Comments

    - - +

    Running At Massive Scale - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/running-locally/index.html b/running-locally/index.html index 1d1ed0738e09..9f07730c1b48 100644 --- a/running-locally/index.html +++ b/running-locally/index.html @@ -1,4283 +1,68 @@ - - - - - - - - - - - - - Running Locally - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Running Locally - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Running Locally

    -

    You have two options:

    -
      -
    1. Use the Dev Container. This takes about 7 minutes. This can be used with VSCode, the devcontainer CLI, or GitHub Codespaces.
    2. -
    3. Install the requirements on your computer manually. This takes about 1 hour.
    4. -
    -

    Development Container

    -

    The development container should be able to do everything you need to do to develop Argo Workflows without installing tools on your local machine. It takes quite a long time to build the container. It runs k3d inside the container so you have a cluster to test against. To communicate with services running either in other development containers or directly on the local machine (e.g. a database), the following URL can be used in the workflow spec: host.docker.internal:<PORT>. This facilitates the implementation of workflows which need to connect to a database or an API server.

    -

    You can use the development container in a few different ways:

    -
      -
    1. Visual Studio Code with Dev Containers extension. Open your argo-workflows folder in VSCode and it should offer to use the development container automatically. VSCode will allow you to forward ports to allow your external browser to access the running components.
    2. -
    3. devcontainer CLI. Once installed, go to your argo-workflows folder and run devcontainer up --workspace-folder . followed by devcontainer exec --workspace-folder . /bin/bash to get a shell where you can build the code. You can use any editor outside the container to edit code; any changes will be mirrored inside the container. Due to a limitation of the CLI, only port 8080 (the Web UI) will be exposed for you to access if you run this way. Other services are usable from the shell inside.
    4. -
    5. GitHub Codespaces. You can start editing as soon as VSCode is open, though you may want to wait for pre-build.sh to finish installing dependencies, building binaries, and setting up the cluster before running any commands in the terminal. Once you start running services (see next steps below), you can click on the "PORTS" tab in the VSCode terminal to see all forwarded ports. You can open the Web UI in a new tab from there.
    6. -
    -

    Once you have entered the container, continue to Developing Locally.

    -

    Note:

    -
      -
    • -

      for Apple Silicon

      -
        -
      • This platform can spend 3 times the indicated time
      • -
      • Configure Docker Desktop to use BuildKit:
      • -
      -
      "features": {
      -  "buildkit": true
      -},
      -
      -
    • -
    • -

      For Windows WSL2

      -
        -
      • Configure .wslconfig to limit memory usage by the WSL2 to prevent VSCode OOM.
      • -
      -
    • -
    • -

      For Linux

      - -
    • -
    -

    Requirements

    -

    Clone the Git repo into: $GOPATH/src/github.com/argoproj/argo-workflows. Any other path will break the code generation.

    -

    Add the following to your /etc/hosts:

    -
    127.0.0.1 dex
    -127.0.0.1 minio
    -127.0.0.1 postgres
    -127.0.0.1 mysql
    -127.0.0.1 azurite
    -
    -

    To build on your own machine without using the Dev Container you will need:

    - -

    We recommend using K3D to set up the local Kubernetes cluster since this will allow you to test RBAC -set-up and is fast. You can set-up K3D to be part of your default kube config as follows:

    -
    k3d cluster start --wait
    -
    -

    Alternatively, you can use Minikube to set up the local Kubernetes cluster. -Once a local Kubernetes cluster has started via minikube start, your kube config will use Minikube's context -automatically.

    -
    -

    Warning

    -

    Do not use Docker Desktop's embedded Kubernetes, it does not support Kubernetes RBAC (i.e. kubectl auth can-i always returns allowed).

    -
    -

    Developing locally

    -

    To start:

    -
      -
    • The controller, so you can run workflows.
    • -
    • MinIO (http://localhost:9000, use admin/password) so you can use artifacts.
    • -
    -

    Run:

    -
    make start
    -
    -

    Make sure you don't see any errors in your terminal. This runs the Workflow Controller locally on your machine (not in Docker/Kubernetes).

    -

    You can submit a workflow for testing using kubectl:

    -
    kubectl create -f examples/hello-world.yaml
    -
    -

    We recommend running make clean before make start to ensure recompilation.

    -

    If you made changes to the executor, you need to build the image:

    -
    make argoexec-image
    -
    -

    To also start the API on http://localhost:2746:

    -
    make start API=true
    -
    -

    This runs the Argo Server (in addition to the Workflow Controller) locally on your machine.

    -

    To also start the UI on http://localhost:8080 (UI=true implies API=true):

    -
    make start UI=true
    -
    -

    diagram

    -

    If you are making change to the CLI (i.e. Argo Server), you can build it separately if you want:

    -
    make cli
    -./dist/argo submit examples/hello-world.yaml ;# new CLI is created as `./dist/argo`
    -
    -

    Although, note that this will be built automatically if you do: make start API=true.

    -

    To test the workflow archive, use PROFILE=mysql or PROFILE=postgres:

    -
    make start PROFILE=mysql
    -
    -

    You'll have, either:

    - -

    To test SSO integration, use PROFILE=sso:

    -
    make start UI=true PROFILE=sso
    -
    -

    Running E2E tests locally

    -

    Start up Argo Workflows using the following:

    -
    make start PROFILE=mysql AUTH_MODE=client STATIC_FILES=false API=true
    -
    -

    If you want to run Azure tests against a local Azurite:

    -
    kubectl -n $KUBE_NAMESPACE apply -f test/e2e/azure/deploy-azurite.yaml
    -make start
    -
    -

    Running One Test

    -

    In most cases, you want to run the test that relates to your changes locally. You should not run all the tests suites. -Our CI will run those concurrently when you create a PR, which will give you feedback much faster.

    -

    Find the test that you want to run in test/e2e

    -
    make TestArtifactServer
    -
    -

    Running A Set Of Tests

    -

    You can find the build tag at the top of the test file.

    -
    //go:build api
    -
    -

    You need to run make test-{buildTag}, so for api that would be:

    -
    make test-api
    -
    -

    Diagnosing Test Failure

    -

    Tests often fail: that's good. To diagnose failure:

    -
      -
    • Run kubectl get pods, are pods in the state you expect?
    • -
    • Run kubectl get wf, is your workflow in the state you expect?
    • -
    • What do the pod logs say? I.e. kubectl logs.
    • -
    • Check the controller and argo-server logs. These are printed to the console you ran make start in. Is anything - logged at level=error?
    • -
    -

    If tests run slowly or time out, factory reset your Kubernetes cluster.

    -

    Committing

    -

    Before you commit code and raise a PR, always run:

    -
    make pre-commit -B
    -
    -

    Please do the following when creating your PR:

    - -

    Examples:

    -
    git commit --signoff -m 'fix: Fixed broken thing. Fixes #1234'
    -
    -
    git commit --signoff -m 'feat: Added a new feature. Fixes #1234'
    -
    -

    Troubleshooting

    -
      -
    • When running make pre-commit -B, if you encounter errors like - make: *** [pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.swagger.json] Error 1, ensure that you - have checked out your code into $GOPATH/src/github.com/argoproj/argo-workflows.
    • -
    • If you encounter "out of heap" issues when building UI through Docker, please validate resources allocated to Docker. - Compilation may fail if allocated RAM is less than 4Gi.
    • -
    • To start profiling with pprof, pass ARGO_PPROF=true when starting the controller locally. - Then run the following:
    • -
    -
    go tool pprof http://localhost:6060/debug/pprof/profile   # 30-second CPU profile
    -go tool pprof http://localhost:6060/debug/pprof/heap      # heap profile
    -go tool pprof http://localhost:6060/debug/pprof/block     # goroutine blocking profile
    -
    -

    Using Multiple Terminals

    -

    I run the controller in one terminal, and the UI in another. I like the UI: it is much faster to debug workflows than -the terminal. This allows you to make changes to the controller and re-start it, without restarting the UI (which I -think takes too long to start-up).

    -

    As a convenience, CTRL=false implies UI=true, so just run:

    -
    make start CTRL=false
    -
    - - - - -

    Comments

    - - +

    Running Locally - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/running-nix/index.html b/running-nix/index.html index 7f9f8e47821e..7954346bc4d1 100644 --- a/running-nix/index.html +++ b/running-nix/index.html @@ -1,4051 +1,68 @@ - - - - - - - - - - - - - Try Argo using Nix - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Try Argo using Nix - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    - -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Try Argo using Nix

    -

    Nix is a package manager / build tool which focuses on reproducible build environments. -Argo Workflows has some basic support for Nix which is enough to get Argo Workflows up and running with minimal effort. -Here are the steps to follow:

    -
      -
    1. Modify your hosts file and set up a Kubernetes cluster according to Running Locally. Don't worry about the other instructions.
    2. -
    3. Install Nix.
    4. -
    5. Run nix develop --extra-experimental-features nix-command --extra-experimental-features flakes ./dev/nix/ --impure (you can add the extra features as a default in your nix.conf file).
    6. -
    7. Run devenv up.
    8. -
    -

    Warning

    -

    This is still bare-bones at the moment, any feature in the Makefile not mentioned here is excluded for now. -In practice, this means that only a make start UI=true equivalent is supported at the moment. -As an additional caveat, there are no LDFlags set in the build; as a result the UI will show 0.0.0-unknown for the version.

    -

    How do I upgrade a dependency?

    -

    Most dependencies are in the Nix packages repository but if you want a specific version, you might have to build it yourself. -This is fairly trivial in Nix, the idea is to just change the version string to whatever package you are concerned about.

    -

    Changing a python dependency version

    -

    If we look at the mkdocs dependency, we see a call to buildPythonPackage, to change the version we need to just modify the version string. -Doing this will display a failure because the hash from the fetchPypi command will now differ, it will also display the correct hash, copy this hash -and replace the existing hash value.

    -

    Changing a go dependency version

    -

    The almost exact same principles apply here, the only difference being you must change the vendorHash and the sha256 fields. -The vendorHash is a hash of the vendored dependencies while the sha256 is for the sources fetched from the fetchFromGithub call.

    -

    Why am I getting a vendorSha256 mismatch ?

    -

    Unfortunately, dependabot is not capable of upgrading flakes automatically, when the go modules are automatically upgraded the -hash of the vendor dependencies changes but this change isn't automatically reflected in the nix file. The vendorSha256 field that needs to -be upgraded can be found by searching for ${package.name} = pkgs.buildGoModule in the nix file.

    - - - - -

    Comments

    - - +

    Try Argo using Nix - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/scaling/index.html b/scaling/index.html index 60522475a813..ad36528b60e3 100644 --- a/scaling/index.html +++ b/scaling/index.html @@ -1,4170 +1,68 @@ - - - - - - - - - - - - - Scaling - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Scaling - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - - - - +
    +
    +
    +
    - - - - - - - - -

    Scaling

    -

    For running large workflows, you'll typically need to scale the controller to match.

    -

    Horizontally Scaling

    -

    You cannot horizontally scale the controller.

    -
    -

    v3.0

    -
    -

    As of v3.0, the controller supports having a hot-standby for High Availability.

    -

    Vertically Scaling

    -

    You can scale the controller vertically in these ways:

    -

    Container Resource Requests

    -

    If you observe the Controller using its total CPU or memory requests, you should increase those.

    -

    Adding Goroutines to Increase Concurrency

    -

    If you have sufficient CPU cores, you can take advantage of them with more goroutines:

    -
      -
    • If you have many Workflows and you notice they're not being reconciled fast enough, increase --workflow-workers.
    • -
    • If you're using TTLStrategy in your Workflows and you notice they're not being deleted fast enough, increase --workflow-ttl-workers.
    • -
    • If you're using PodGC in your Workflows and you notice the Pods aren't being deleted fast enough, increase --pod-cleanup-workers.
    • -
    -
    -

    v3.5 and after

    -
    -
      -
    • If you're using a lot of CronWorkflows and they don't seem to be firing on time, increase --cron-workflow-workers.
    • -
    -

    K8S API Client Side Rate Limiting

    -

    The K8S client library rate limits the messages that can go out.

    -

    If you frequently see messages similar to this in the Controller log (issued by the library):

    -
    Waited for 7.090296384s due to client-side throttling, not priority and fairness, request: GET:https://10.100.0.1:443/apis/argoproj.io/v1alpha1/namespaces/argo/workflowtemplates/s2t
    -
    -

    Or, in >= v3.5, if you see warnings similar to this (could be any CR, not just WorkflowTemplate):

    -
    Waited for 7.090296384s, request:GET:https://10.100.0.1:443/apis/argoproj.io/v1alpha1/namespaces/argo/workflowtemplates/s2t
    -
    -

    Then, if your K8S API Server can handle more requests:

    -
      -
    • Increase both --qps and --burst arguments for the Controller. The qps value indicates the average number of queries per second allowed by the K8S Client. The burst value is the number of queries/sec the Client receives before it starts enforcing qps, so typically burst > qps. If not set, the default values are qps=20 and burst=30 (as of v3.5 (refer to cmd/workflow-controller/main.go in case the values change)).
    • -
    -

    Sharding

    -

    One Install Per Namespace

    -

    Rather than running a single installation in your cluster, run one per namespace using the --namespaced flag.

    -

    Instance ID

    -

    Within a cluster can use instance ID to run N Argo instances within a cluster.

    -

    Create one namespace for each Argo, e.g. argo-i1, argo-i2:.

    -

    Edit workflow-controller-configmap.yaml for each namespace to set an instance ID.

    -
    apiVersion: v1
    -kind: ConfigMap
    -metadata:
    -  name: workflow-controller-configmap
    -data:
    -    instanceID: i1
    -
    -
    -

    v2.9 and after

    -
    -

    You may need to pass the instance ID to the CLI:

    -
    argo --instanceid i1 submit my-wf.yaml
    -
    -

    You do not need to have one instance ID per namespace, you could have many or few.

    -

    Maximum Recursion Depth

    -

    In order to protect users against infinite recursion, the controller has a default maximum recursion depth of 100 calls to templates.

    -

    This protection can be disabled with the environment variable DISABLE_MAX_RECURSION=true

    -

    Miscellaneous

    -

    See also Running At Massive Scale.

    - - - - -

    Comments

    - - +

    Scaling - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/security/index.html b/security/index.html index 20b32c98da7d..7ca54f103505 100644 --- a/security/index.html +++ b/security/index.html @@ -1,4230 +1,68 @@ - - - - - - - - - - - - - Security - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Security - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Security

    -

    To report security issues.

    -

    💡 Read Practical Argo Workflows Hardening.

    -

    Workflow Controller Security

    -

    This has three parts.

    -

    Controller Permissions

    -

    The controller has permission (via Kubernetes RBAC + its config map) with either all namespaces (cluster-scope install) or a single managed namespace (namespace-install), notably:

    -
      -
    • List/get/update workflows, and cron-workflows.
    • -
    • Create/get/delete pods, PVCs, and PDBs.
    • -
    • List/get template, config maps, service accounts, and secrets.
    • -
    -

    See workflow-controller-cluster-role.yaml or workflow-controller-role.yaml

    -

    User Permissions

    -

    Users minimally need permission to create/read workflows. The controller will then create workflow pods (config maps etc) on behalf of the users, even if the user does not have permission to do this themselves. The controller will only create workflow pods in the workflow's namespace.

    -

    A way to think of this is that, if the user has permission to create a workflow in a namespace, then it is OK to create pods or anything else for them in that namespace.

    -

    If the user only has permission to create workflows, then they will be typically unable to configure other necessary resources such as config maps, or view the outcome of their workflow. This is useful when the user is a service.

    -
    -

    Warning

    -

    If you allow users to create workflows in the controller's namespace (typically argo), it may be possible for users to modify the controller itself. In a namespace-install the managed namespace should therefore not be the controller's namespace.

    -
    -

    You can typically further restrict what a user can do to just being able to submit workflows from templates using the workflow restrictions feature.

    -

    UI Access

    -

    If you want a user to have read-only access to the entirety of the Argo UI for their namespace, a sample role for them may look like:

    -
    apiVersion: rbac.authorization.k8s.io/v1
    -kind: Role
    -metadata:
    -  name: ui-user-read-only
    -rules:
    -  # k8s standard APIs
    -  - apiGroups:
    -      - ""
    -    resources:
    -      - events
    -      - pods
    -      - pods/log
    -    verbs:
    -      - get
    -      - list
    -      - watch
    -  # Argo APIs. See also https://github.com/argoproj/argo-workflows/blob/main/manifests/cluster-install/workflow-controller-rbac/workflow-aggregate-roles.yaml#L4
    -  - apiGroups:
    -      - argoproj.io
    -    resources:
    -      - eventsources
    -      - sensors
    -      - workflows
    -      - workfloweventbindings
    -      - workflowtemplates
    -      - clusterworkflowtemplates
    -      - cronworkflows
    -      - cronworkflows
    -      - workflowtaskresults
    -    verbs:
    -      - get
    -      - list
    -      - watch
    -
    -

    Workflow Pod Permissions

    -

    Workflow pods run using either:

    -
      -
    • The default service account.
    • -
    • The service account declared in the workflow spec.
    • -
    -

    There is no restriction on which service account in a namespace may be used.

    -

    This service account typically needs permissions.

    -

    Different service accounts should be used if a workflow pod needs to have elevated permissions, e.g. to create other resources.

    -

    The main container will have the service account token mounted, allowing the main container to patch pods (among other permissions). Set automountServiceAccountToken to false to prevent this. See fields.

    -

    By default, workflows pods run as root. To further secure workflow pods, set the workflow pod security context.

    -

    You should configure the controller with the correct workflow executor for your trade off between security and scalability.

    -

    These settings can be set by default using workflow defaults.

    -

    Argo Server Security

    -

    Argo Server implements security in three layers.

    -

    Firstly, you should enable transport layer security to ensure your data cannot be read in transit.

    -

    Secondly, you should enable an authentication mode to ensure that you do not run workflows from unknown users.

    -

    Finally, you should configure the argo-server role and role binding with the correct permissions.

    -

    Read-Only

    -

    You can achieve this by configuring the argo-server role (example with only read access (i.e. only get/list/watch verbs)).

    -

    Network Security

    -

    Argo Workflows requires various levels of network access depending on configuration and the features enabled. The following describes the different workflow components and their network access needs, to help provide guidance on how to configure the argo namespace in a secure manner (e.g. NetworkPolicy).

    -

    Argo Server

    -

    The Argo Server is commonly exposed to end-users to provide users with a UI for visualizing and managing their workflows. It must also be exposed if leveraging webhooks to trigger workflows. Both of these use cases require that the argo-server Service to be exposed for ingress traffic (e.g. with an Ingress object or load balancer). Note that the Argo UI is also available to be accessed by running the server locally (i.e. argo server) using local KUBECONFIG credentials, and visiting the UI over https://localhost:2746.

    -

    The Argo Server additionally has a feature to allow downloading of artifacts through the UI. This feature requires that the argo-server be given egress access to the underlying artifact provider (e.g. S3, GCS, MinIO, Artifactory, Azure Blob Storage) in order to download and stream the artifact.

    -

    Workflow Controller

    -

    The workflow-controller Deployment exposes a Prometheus metrics endpoint (workflow-controller-metrics:9090) so that a Prometheus server can periodically scrape for controller level metrics. Since Prometheus is typically running in a separate namespace, the argo namespace should be configured to allow cross-namespace ingress access to the workflow-controller-metrics Service.

    -

    Database access

    -

    A persistent store can be configured for either archiving or offloading workflows. If either of these features are enabled, both the workflow-controller and argo-server Deployments will need egress network access to the external database used for archiving/offloading.

    - - - - -

    Comments

    - - +

    Security - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/service-accounts/index.html b/service-accounts/index.html index 7e81230ca546..84c6a296b48a 100644 --- a/service-accounts/index.html +++ b/service-accounts/index.html @@ -1,4014 +1,68 @@ - - - - - - - - - - - - - Service Accounts - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Service Accounts - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Service Accounts

    -

    Configure the service account to run Workflows

    -

    Roles, Role-Bindings, and Service Accounts

    -

    In order for Argo to support features such as artifacts, outputs, access to secrets, etc. it needs to communicate with Kubernetes resources -using the Kubernetes API. To communicate with the Kubernetes API, Argo uses a ServiceAccount to authenticate itself to the Kubernetes API. -You can specify which Role (i.e. which permissions) the ServiceAccount that Argo uses by binding a Role to a ServiceAccount using a RoleBinding

    -

    Then, when submitting Workflows you can specify which ServiceAccount Argo uses using:

    -
    argo submit --serviceaccount <name>
    -
    -

    When no ServiceAccount is provided, Argo will use the default ServiceAccount from the namespace from which it is run, which will almost always have insufficient privileges by default.

    -

    For more information about granting Argo the necessary permissions for your use case see Workflow RBAC.

    -

    Granting admin privileges

    -

    For the purposes of this demo, we will grant the default ServiceAccount admin privileges (i.e., we will bind the admin Role to the default ServiceAccount of the current namespace):

    -
    kubectl create rolebinding default-admin --clusterrole=admin --serviceaccount=argo:default -n argo
    -
    -

    Note that this will grant admin privileges to the default ServiceAccount in the namespace that the command is run from, so you will only be able to -run Workflows in the namespace where the RoleBinding was made.

    - - - - -

    Comments

    - - +

    Service Accounts - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/sidecar-injection/index.html b/sidecar-injection/index.html index a91bb20a15a7..85b32d28e0f9 100644 --- a/sidecar-injection/index.html +++ b/sidecar-injection/index.html @@ -1,4053 +1,68 @@ - - - - - - - - - - - - - Sidecar Injection - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Sidecar Injection - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Sidecar Injection

    -

    Automatic (i.e. mutating webhook based) sidecar injection systems, including service meshes such as Anthos and Istio -Proxy, create a unique problem for Kubernetes workloads that run to completion.

    -

    Because sidecars are injected outside of the view of the workflow controller, the controller has no awareness of them. -It has no opportunity to rewrite the containers command (when using the Emissary Executor) and as the sidecar's process -will run as PID 1, which is protected. It can be impossible for the wait container to terminate the sidecar.

    -

    You will minimize problems by not using Istio with Argo Workflows.

    -

    See #1282.

    -

    Support Matrix

    -

    Key:

    -
      -
    • Unsupported - this executor is no longer supported
    • -
    • Any - we can kill any image
    • -
    • KubectlExec - we kill images by running kubectl exec
    • -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ExecutorSidecarInjected Sidecar
    dockerAnyUnsupported
    emissaryAnyKubectlExec
    k8sapiShellKubectlExec
    kubeletShellKubectlExec
    pnsAnyAny
    -

    How We Kill Sidecars Using kubectl exec

    -
    -

    v3.1 and after

    -
    -

    Kubernetes does not provide a way to kill a single container. You can delete a pod, but this kills all containers, and loses all information -and logs of that pod.

    -

    Instead, try to mimic the Kubernetes termination behavior, which is:

    -
      -
    1. SIGTERM PID 1
    2. -
    3. Wait for the pod's terminateGracePeriodSeconds (30s by default).
    4. -
    5. SIGKILL PID 1
    6. -
    -

    The following are not supported:

    -
      -
    • preStop
    • -
    • STOPSIGNAL
    • -
    -

    To do this, it must be possible to run a kubectl exec command that kills the injected sidecar. By default it runs /bin/sh -c 'kill 1'. This can fail:

    -
      -
    1. No /bin/sh.
    2. -
    3. Process is not running as PID 1 (which is becoming the default these days due to runAsNonRoot).
    4. -
    5. Process does not correctly respond to kill 1 (e.g. some shell script weirdness).
    6. -
    -

    You can override the kill command by using a pod annotation (where %d is the signal number), for example:

    -
    spec:
    -  podMetadata:
    -    annotations:
    -      workflows.argoproj.io/kill-cmd-istio-proxy: '["pilot-agent", "request", "POST", "quitquitquit"]'
    -      workflows.argoproj.io/kill-cmd-vault-agent: '["sh", "-c", "kill -%d 1"]'
    -      workflows.argoproj.io/kill-cmd-sidecar: '["sh", "-c", "kill -%d $(pidof entrypoint.sh)"]'
    -
    - - - - -

    Comments

    - - +

    Sidecar Injection - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/static-code-analysis/index.html b/static-code-analysis/index.html index cf4be3dfe3f9..a913e3db73b7 100644 --- a/static-code-analysis/index.html +++ b/static-code-analysis/index.html @@ -1,3916 +1,68 @@ - - - - - - - - - - - - - Static Code Analysis - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Static Code Analysis - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Static Code Analysis

    -

    We use the following static code analysis tools:

    -
      -
    • golangci-lint and eslint for compile time linting.
    • -
    • Snyk for dependency and image scanning (SCA).
    • -
    -

    These are at least run daily or on each pull request.

    - - - - -

    Comments

    - - +

    Static Code Analysis - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/stress-testing/index.html b/stress-testing/index.html index e5e5672f8a78..a2bd308473d6 100644 --- a/stress-testing/index.html +++ b/stress-testing/index.html @@ -1,4003 +1,68 @@ - - - - - - - - - - - - - Stress Testing - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Stress Testing - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Stress Testing

    -

    Install gcloud binary.

    -
    # Login to GCP:
    -gloud auth login
    -
    -# Set-up your config (if needed):
    -gcloud config set project alex-sb
    -
    -# Create a cluster (default region is us-west-2, if you're not in west of the USA, you might want at different region):
    -gcloud container clusters create-auto argo-workflows-stress-1
    -
    -# Get credentials:
    -gcloud container clusters get-credentials argo-workflows-stress-1                             
    -
    -# Install workflows (If this fails, try running it again):
    -make start PROFILE=stress
    -
    -# Make sure pods are running:
    -kubectl get deployments
    -
    -# Run a test workflow:
    -argo submit examples/hello-world.yaml --watch
    -
    -

    Checks

    - -

    Run go run ./test/stress/tool -n 10000 to run a large number of workflows.

    -

    Check Prometheus:

    -
      -
    1. See how many Kubernetes API requests are being made. You will see about one Update workflows - per reconciliation, multiple Create pods. You should expect to see one Get workflowtemplates per workflow (done - on first reconciliation). Otherwise, if you see anything else, that might be a problem.
    2. -
    3. How many errors were logged? log_messages{level="error"} What was the cause?
    4. -
    -

    Check PProf to see if there any any hot spots:

    -
    go tool pprof -png http://localhost:6060/debug/pprof/allocs
    -go tool pprof -png http://localhost:6060/debug/pprof/heap
    -go tool pprof -png http://localhost:6060/debug/pprof/profile
    -
    -

    Clean-up

    -
    gcloud container clusters delete argo-workflows-stress-1
    -
    - - - - -

    Comments

    - - +

    Stress Testing - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/survey-data-privacy/index.html b/survey-data-privacy/index.html index 3bf7623ac430..752f3c731dfa 100644 --- a/survey-data-privacy/index.html +++ b/survey-data-privacy/index.html @@ -1,3911 +1,68 @@ - - - - - - - - - - - - - Survey Data Privacy - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Survey Data Privacy - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/suspend-template/index.html b/suspend-template/index.html index 9372338dba2f..ad6f3ff59578 100644 --- a/suspend-template/index.html +++ b/suspend-template/index.html @@ -1,3916 +1,68 @@ - - - - - - - - - - - - - Suspend Template - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Suspend Template - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/swagger/index.html b/swagger/index.html index 4104897ba015..5a1c762ed15d 100644 --- a/swagger/index.html +++ b/swagger/index.html @@ -1,3937 +1,68 @@ - - - - - - - - - - - - - API Reference - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + SwaggerUI + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    API Reference

    - - - - - - - SwaggerUI - - - -
    - - - - - - - - -

    Comments

    - - +

    SwaggerUI

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/synchronization/index.html b/synchronization/index.html index a024e034bcdf..ca8e2399e5c3 100644 --- a/synchronization/index.html +++ b/synchronization/index.html @@ -1,4142 +1,68 @@ - - - - - - - - - - - - - Synchronization - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Synchronization - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Synchronization

    -
    -

    v2.10 and after

    -
    -

    Introduction

    -

    Synchronization enables users to limit the parallel execution of certain workflows or -templates within a workflow without having to restrict others.

    -

    Users can create multiple synchronization configurations in the ConfigMap that can be referred to -from a workflow or template within a workflow. Alternatively, users can -configure a mutex to prevent concurrent execution of templates or -workflows using the same mutex.

    -

    For example:

    -
    apiVersion: v1
    -kind: ConfigMap
    -metadata:
    - name: my-config
    -data:
    -  workflow: "1"  # Only one workflow can run at given time in particular namespace
    -  template: "2"  # Two instances of template can run at a given time in particular namespace
    -
    -

    Workflow-level Synchronization

    -

    Workflow-level synchronization limits parallel execution of the workflow if workflows have the same synchronization reference. -In this example, Workflow refers to workflow synchronization key which is configured as limit 1, -so only one workflow instance will be executed at given time even multiple workflows created.

    -

    Using a semaphore configured by a ConfigMap:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: synchronization-wf-level-
    -spec:
    -  entrypoint: whalesay
    -  synchronization:
    -    semaphore:
    -      configMapKeyRef:
    -        name: my-config
    -        key: workflow
    -  templates:
    -  - name: whalesay
    -    container:
    -      image: docker/whalesay:latest
    -      command: [cowsay]
    -      args: ["hello world"]
    -
    -

    Using a mutex:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: synchronization-wf-level-
    -spec:
    -  entrypoint: whalesay
    -  synchronization:
    -    mutex:
    -      name: workflow
    -  templates:
    -  - name: whalesay
    -    container:
    -      image: docker/whalesay:latest
    -      command: [cowsay]
    -      args: ["hello world"]
    -
    -

    Template-level Synchronization

    -

    Template-level synchronization limits parallel execution of the template across workflows, if templates have the same synchronization reference. -In this example, acquire-lock template has synchronization reference of template key which is configured as limit 2, -so two instances of templates will be executed at a given time: even multiple steps/tasks within workflow or different workflows referring to the same template.

    -

    Using a semaphore configured by a ConfigMap:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: synchronization-tmpl-level-
    -spec:
    -  entrypoint: synchronization-tmpl-level-example
    -  templates:
    -  - name: synchronization-tmpl-level-example
    -    steps:
    -    - - name: synchronization-acquire-lock
    -        template: acquire-lock
    -        arguments:
    -          parameters:
    -          - name: seconds
    -            value: "{{item}}"
    -        withParam: '["1","2","3","4","5"]'
    -
    -  - name: acquire-lock
    -    synchronization:
    -      semaphore:
    -        configMapKeyRef:
    -          name: my-config
    -          key: template
    -    container:
    -      image: alpine:latest
    -      command: [sh, -c]
    -      args: ["sleep 10; echo acquired lock"]
    -
    -

    Using a mutex:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: synchronization-tmpl-level-
    -spec:
    -  entrypoint: synchronization-tmpl-level-example
    -  templates:
    -  - name: synchronization-tmpl-level-example
    -    steps:
    -    - - name: synchronization-acquire-lock
    -        template: acquire-lock
    -        arguments:
    -          parameters:
    -          - name: seconds
    -            value: "{{item}}"
    -        withParam: '["1","2","3","4","5"]'
    -
    -  - name: acquire-lock
    -    synchronization:
    -      mutex:
    -        name: template
    -    container:
    -      image: alpine:latest
    -      command: [sh, -c]
    -      args: ["sleep 10; echo acquired lock"]
    -
    -

    Examples:

    -
      -
    1. Workflow level semaphore
    2. -
    3. Workflow level mutex
    4. -
    5. Step level semaphore
    6. -
    7. Step level mutex
    8. -
    -

    Other Parallelism support

    -

    In addition to this synchronization, the workflow controller supports a parallelism setting that applies to all workflows -in the system (it is not granular to a class of workflows, or tasks withing them). Furthermore, there is a parallelism setting -at the workflow and template level, but this only restricts total concurrent executions of tasks within the same workflow.

    - - - - -

    Comments

    - - +

    Synchronization - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/template-defaults/index.html b/template-defaults/index.html index 1abd80dc8b8c..a6ae67585c22 100644 --- a/template-defaults/index.html +++ b/template-defaults/index.html @@ -1,4030 +1,68 @@ - - - - - - - - - - - - - Template Defaults - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Template Defaults - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Template Defaults

    -
    -

    v3.1 and after

    -
    -

    Introduction

    -

    TemplateDefaults feature enables the user to configure the default template values in workflow spec level that will apply to all the templates in the workflow. If the template has a value that also has a default value in templateDefault, the Template's value will take precedence. These values will be applied during the runtime. Template values and default values are merged using Kubernetes strategic merge patch. To check whether and how list values are merged, inspect the patchStrategy and patchMergeKey tags in the workflow definition.

    -

    Configuring templateDefaults in WorkflowSpec

    -

    For example:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  name: template-defaults-example
    -spec:
    -  entrypoint: main
    -  templateDefaults:
    -    timeout: 30s   # timeout value will be applied to all templates
    -    retryStrategy: # retryStrategy value will be applied to all templates
    -      limit: 2
    -  templates:
    -  - name: main
    -    container:
    -      image: docker/whalesay:latest
    -
    -

    template defaults example

    -

    Configuring templateDefaults in Controller Level

    -

    Operator can configure the templateDefaults in workflow defaults. This templateDefault will be applied to all the workflow which runs on the controller.

    -

    The following would be specified in the Config Map:

    -
    apiVersion: v1
    -kind: ConfigMap
    -metadata:
    -  name: workflow-controller-configmap
    -data:
    -  # Default values that will apply to all Workflows from this controller, unless overridden on the Workflow-level
    -  workflowDefaults: |
    -    metadata:
    -      annotations:
    -        argo: workflows
    -      labels:
    -        foo: bar
    -    spec:
    -      ttlStrategy:
    -        secondsAfterSuccess: 5
    -      templateDefaults:
    -        timeout: 30s
    -
    - - - - -

    Comments

    - - +

    Template Defaults - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/tls/index.html b/tls/index.html index 42e4b145ae8a..e79915c7e97d 100644 --- a/tls/index.html +++ b/tls/index.html @@ -1,4112 +1,68 @@ - - - - - - - - - - - - - Transport Layer Security - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Transport Layer Security - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Transport Layer Security

    -
    -

    v2.8 and after

    -
    -

    If you're running Argo Server you have three options with increasing transport security (note - you should also be -running authentication):

    -

    Default configuration

    -
    -

    v2.8 - 2.12

    -
    -

    Defaults to Plain Text

    -
    -

    v3.0 and after

    -
    -

    Defaults to Encrypted if cert is available

    -

    Argo image/deployment defaults to Encrypted with a self-signed certificate which expires after 365 days.

    -

    Plain Text

    -

    Recommended for: development.

    -

    Everything is sent in plain text.

    -

    Start Argo Server with the --secure=false (or ARGO_SECURE=false) flag, e.g.:

    -
    export ARGO_SECURE=false
    -argo server --secure=false
    -
    -

    To secure the UI you may front it with a HTTPS proxy.

    -

    Encrypted

    -

    Recommended for: development and test environments.

    -

    You can encrypt connections without any real effort.

    -

    Start Argo Server with the --secure flag, e.g.:

    -
    argo server --secure
    -
    -

    It will start with a self-signed certificate that expires after 365 days.

    -

    Run the CLI with --secure (or ARGO_SECURE=true) and --insecure-skip-verify (or ARGO_INSECURE_SKIP_VERIFY=true).

    -
    argo --secure --insecure-skip-verify list
    -
    -
    export ARGO_SECURE=true
    -export ARGO_INSECURE_SKIP_VERIFY=true
    -argo --secure --insecure-skip-verify list
    -
    -

    Tip: Don't forget to update your readiness probe to use HTTPS. To do so, edit your argo-server -Deployment's readinessProbe spec:

    -
    readinessProbe:
    -    httpGet: 
    -        scheme: HTTPS
    -
    -

    Encrypted and Verified

    -

    Recommended for: production environments.

    -

    Run your HTTPS proxy in front of the Argo Server. You'll need to set-up your certificates (this is out of scope of this -documentation).

    -

    Start Argo Server with the --secure flag, e.g.:

    -
    argo server --secure
    -
    -

    As before, it will start with a self-signed certificate that expires after 365 days.

    -

    Run the CLI with --secure (or ARGO_SECURE=true) only.

    -
    argo --secure list
    -
    -
    export ARGO_SECURE=true
    -argo list
    -
    -

    TLS Min Version

    -

    Set TLS_MIN_VERSION to be the minimum TLS version to use. This is v1.2 by default.

    -

    This must be one of these int values.

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    VersionValue
    v1.0769
    v1.1770
    v1.2771
    v1.3772
    - - - - -

    Comments

    - - +

    Transport Layer Security - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/tolerating-pod-deletion/index.html b/tolerating-pod-deletion/index.html index 49faea9878ea..5d7239a092da 100644 --- a/tolerating-pod-deletion/index.html +++ b/tolerating-pod-deletion/index.html @@ -1,3987 +1,68 @@ - - - - - - - - - - - - - Tolerating Pod Deletion - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Tolerating Pod Deletion - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Tolerating Pod Deletion

    -
    -

    v2.12 and after

    -
    -

    In Kubernetes, pods are cattle and can be deleted at any time. Deletion could be manually via kubectl delete pod, during a node drain, or for other reasons.

    -

    This can be very inconvenient, your workflow will error, but for reasons outside of your control.

    -

    A pod disruption budget can reduce the likelihood of this happening. But, it cannot entirely prevent it.

    -

    To retry pods that were deleted, set retryStrategy.retryPolicy: OnError.

    -

    This can be set at a workflow-level, template-level, or globally (using workflow defaults)

    -

    Example

    -

    Run the following workflow (which will sleep for 30s):

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  name: example
    -spec:
    -  retryStrategy:
    -   retryPolicy: OnError
    -   limit: 1
    -  entrypoint: main
    -  templates:
    -    - name: main
    -      container:
    -        image: docker/whalesay:latest
    -        command:
    -          - sleep
    -          - 30s
    -
    -

    Then execute kubectl delete pod example. You'll see that the errored node is automatically retried.

    -

    💡 Read more on architecting workflows for reliability.

    - - - - -

    Comments

    - - +

    Tolerating Pod Deletion - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/training/index.html b/training/index.html index 65ec0e55c035..ccc5e9a9e54f 100644 --- a/training/index.html +++ b/training/index.html @@ -1,3991 +1,68 @@ - - - - - - - - - - - - - Training - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Training - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Training

    -

    Videos

    -

    We also have a YouTube playlist of videos that includes workshops you can follow along with:

    -

    Videos Screenshot Open the playlist

    -

    Hands-On

    -

    We've created a Killercoda course featuring beginner and intermediate lessons. These allow to you try out Argo Workflows in your web browser without needing to install anything on your computer. Each lesson starts up a Kubernetes cluster that you can access via a web browser.

    -

    Additional resources

    -

    Visit the awesome-argo GitHub repo for more educational resources.

    - - - - -

    Comments

    - - +

    Training - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/upgrading/index.html b/upgrading/index.html index f9e6327c82b5..9e6af78e7ae1 100644 --- a/upgrading/index.html +++ b/upgrading/index.html @@ -1,4677 +1,68 @@ - - - - - - - - - - - - - Upgrading Guide - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Upgrading Guide - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Upgrading Guide

    -

    Breaking changes typically (sometimes we don't realise they are breaking) have "!" in the commit message, as per -the conventional commits.

    -

    Upgrading to v3.5

    -

    There are no known breaking changes in this release. Please file an issue if you encounter any unexpected problems after upgrading.

    -

    Upgrading to v3.4

    -

    Non-Emissary executors are removed. (#7829)

    -

    Emissary executor is now the only supported executor. If you are using other executors, e.g. docker, k8sapi, pns, and kubelet, you need to -remove your containerRuntimeExecutors and containerRuntimeExecutor from your controller's configmap. If you have workflows that use different -executors with the label workflows.argoproj.io/container-runtime-executor, this is no longer supported and will not be effective.

    -

    chore!: Remove dataflow pipelines from codebase. (#9071)

    -

    You are affected if you are using dataflow pipelines in the UI or via the /pipelines endpoint. -We no longer support dataflow pipelines and all relevant code has been removed.

    -

    feat!: Add entrypoint lookup. Fixes #8344

    -

    Affected if:

    -
      -
    • Using the Emissary executor.
    • -
    • Used the args field for any entry in images.
    • -
    -

    This PR automatically looks up the command and entrypoint. The implementation for config look-up was incorrect (it -allowed you to specify args but not entrypoint). args has been removed to correct the behaviour.

    -

    If you are incorrectly configured, the workflow controller will error on start-up.

    -

    Actions

    -

    You don't need to configure images that use v2 manifests anymore. You can just remove them (e.g. argoproj/argosay:v2):

    -
    % docker manifest inspect argoproj/argosay:v2
    -...
    -"schemaVersion": 2,
    -...
    -
    -

    For v1 manifests (e.g. docker/whalesay:latest):

    -
    % docker image inspect -f '{{.Config.Entrypoint}} {{.Config.Cmd}}' docker/whalesay:latest
    -[] [/bin/bash]
    -
    -
    images:
    -  docker/whalesay:latest:
    -    cmd: [/bin/bash]
    -
    -

    feat: Fail on invalid config. (#8295)

    -

    The workflow controller will error on start-up if incorrectly configured, rather than silently ignoring -mis-configuration.

    -
    Failed to register watch for controller config map: error unmarshaling JSON: while decoding JSON: json: unknown field \"args\"
    -
    -

    feat: add indexes for improve archived workflow performance. (#8860)

    -

    This PR adds indexes to archived workflow tables. This change may cause a long time to upgrade if the user has a large table.

    -

    feat: enhance artifact visualization (#8655)

    -

    For AWS users using S3: visualizing artifacts in the UI and downloading them now requires an additional "Action" to be configured in your S3 bucket policy: "ListBucket".

    -

    Upgrading to v3.3

    -

    662a7295b feat: Replace patch pod with create workflowtaskresult. Fixes #3961 (#8000)

    -

    The PR changes the permissions that can be used by a workflow to remove the pod patch permission.

    -

    See workflow RBAC and #8013.

    -

    06d4bf76f fix: Reduce agent permissions. Fixes #7986 (#7987)

    -

    The PR changes the permissions used by the agent to report back the outcome of HTTP template requests. The permission patch workflowtasksets/status replaces patch workflowtasksets, for example:

    -
    apiVersion: rbac.authorization.k8s.io/v1
    -kind: Role
    -metadata:
    -  name: agent
    -rules:
    -  - apiGroups:
    -      - argoproj.io
    -    resources:
    -      - workflowtasksets/status
    -    verbs:
    -      - patch
    -
    -

    Workflows running during any upgrade should be give both permissions.

    -

    See #8013.

    -

    feat!: Remove deprecated config flags

    -

    This PR removes the following configmap items -

    -
      -
    • executorImage (use executor.image in configmap instead) - e.g. - Workflow controller configmap similar to the following one given below won't be valid anymore:
    • -
    -
    apiVersion: v1
    -kind: ConfigMap
    -metadata:
    -  name: workflow-controller-configmap
    -data:
    -  ...
    -  executorImage: argoproj/argocli:latest
    -  ...
    -
    -

    From now and onwards, only provide the executor image in workflow controller as a command argument as shown below:

    -
    apiVersion: v1
    -kind: ConfigMap
    -metadata:
    -  name: workflow-controller-configmap
    -data:
    -  ...
    -  executor: |
    -    image: argoproj/argocli:latest
    -  ...
    -
    -
      -
    • executorImagePullPolicy (use executor.imagePullPolicy in configmap instead) - e.g. - Workflow controller configmap similar to the following one given below won't be valid anymore:
    • -
    -
    data:
    -  ...
    -  executorImagePullPolicy: IfNotPresent
    -  ...
    -
    -

    Change it as shown below:

    -
    data:
    -  ...
    -  executor: |
    -    imagePullPolicy: IfNotPresent
    -  ...
    -
    -
      -
    • executorResources (use executor.resources in configmap instead) - e.g. - Workflow controller configmap similar to the following one given below won't be valid anymore:
    • -
    -
    data:
    -  ...
    -  executorResources:
    -    requests:
    -      cpu: 0.1
    -      memory: 64Mi
    -    limits:
    -      cpu: 0.5
    -      memory: 512Mi
    -  ...
    -
    -

    Change it as shown below:

    -
    data:
    -  ...
    -  executor: |
    -    resources:
    -      requests:
    -        cpu: 0.1
    -        memory: 64Mi
    -      limits:
    -        cpu: 0.5
    -        memory: 512Mi
    -  ...
    -
    -

    fce82d572 feat: Remove pod workers (#7837)

    -

    This PR removes pod workers from the code, the pod informer directly writes into the workflow queue. As a result the --pod-workers flag has been removed.

    -

    93c11a24ff feat: Add TLS to Metrics and Telemetry servers (#7041)

    -

    This PR adds the ability to send metrics over TLS with a self-signed certificate. In v3.5 this will be enabled by default, so it is recommended that users enable this functionality now.

    -

    0758eab11 feat(server)!: Sync dispatch of webhook events by default

    -

    This is not expected to impact users.

    -

    Events dispatch in the Argo Server has been change from async to sync by default. This is so that errors are surfaced to -the client, rather than only appearing as logs or Kubernetes events. It is possible that response times under load are -too long for your client and you may prefer to revert this behaviour.

    -

    To revert this behaviour, restart Argo Server with ARGO_EVENT_ASYNC_DISPATCH=true. Make sure that asyncDispatch=true -is logged.

    -

    bd49c6303 fix(artifact)!: default https to any URL missing a scheme. Fixes #6973

    -

    HTTPArtifact without a scheme will now defaults to https instead of http

    -

    user need to explicitly include a http prefix if they want to retrieve HTTPArtifact through http

    -

    chore!: Remove the hidden flag --verify from argo submit

    -

    The hidden flag --verify has been removed from argo submit. This is a internal testing flag we don't need anymore.

    -

    Upgrading to v3.2

    -

    e5b131a33 feat: Add template node to pod name. Fixes #1319 (#6712)

    -

    This add the template name to the pod name, to make it easier to understand which pod ran which step. This behaviour can be reverted by setting POD_NAMES=v1 on the workflow controller.

    -

    be63efe89 feat(executor)!: Change argoexec base image to alpine. Closes #5720 (#6006)

    -

    Changing from Debian to Alpine reduces the size of the argoexec image, resulting is faster starting workflow pods, and it also reduce the risk of security issues. There is not such thing as a free lunch. There maybe other behaviour changes we don't know of yet.

    -

    Some users found this change prevented workflow with very large parameters from running. See #7586

    -

    48d7ad3 chore: Remove onExit naming transition scaffolding code (#6297)

    -

    When upgrading from <v2.12 to >v3.2 workflows that are running at the time of the upgrade and have onExit steps may experience the onExit step running twice. This is only applicable for workflows that began running before a workflow-controller upgrade and are still running after the upgrade is complete. This is only applicable for upgrading from v2.12 or earlier directly to v3.2 or later. Even under these conditions, duplicate work may not be experienced.

    -

    Upgrading to v3.1

    -

    3fff791e4 build!: Automatically add manifests to v* tags (#5880)

    -

    The manifests in the repository on the tag will no longer contain the image tag, instead they will contain :latest.

    -
      -
    • You must not get your manifests from the Git repository, you must get them from the release notes.
    • -
    • You must not use the stable tag. This is defunct, and will be removed in v3.1.
    • -
    -

    ab361667a feat(controller) Emissary executor. (#4925)

    -

    The Emissary executor is not a breaking change per-se, but it is brand new so we would not recommend you use it by default yet. Instead, we recommend you test it out on some workflows using a workflow-controller-configmap configuration.

    -
    # Specifies the executor to use.
    -#
    -# You can use this to:
    -# * Tailor your executor based on your preference for security or performance.
    -# * Test out an executor without committing yourself to use it for every workflow.
    -#
    -# To find out which executor was actually use, see the `wait` container logs.
    -#
    -# The list is in order of precedence; the first matching executor is used.
    -# This has precedence over `containerRuntimeExecutor`.
    -containerRuntimeExecutors: |
    -  - name: emissary
    -    selector:
    -      matchLabels:
    -        workflows.argoproj.io/container-runtime-executor: emissary
    -
    -

    be63efe89 feat(controller): Expression template tags. Resolves #4548 & #1293 (#5115)

    -

    This PR introduced a new expression syntax know as "expression tag template". A user has reported that this does not -always play nicely with the when condition syntax (Goevaluate).

    -

    This can be resolved using a single quote in your when expression:

    -
    when: "'{{inputs.parameters.should-print}}' != '2021-01-01'"
    -
    -

    Learn more

    -

    Upgrading to v3.0

    -

    defbd600e fix: Default ARGO_SECURE=true. Fixes #5607 (#5626)

    -

    The server now starts with TLS enabled by default if a key is available. The original behaviour can be configured with --secure=false.

    -

    If you have an ingress, you may need to add the appropriate annotations:(varies by ingress):

    -
    alb.ingress.kubernetes.io/backend-protocol: HTTPS
    -nginx.ingress.kubernetes.io/backend-protocol: HTTPS
    -
    -

    01d310235 chore(server)!: Required authentication by default. Resolves #5206 (#5211)

    -

    To login to the user interface, you must provide a login token. The original behaviour can be configured with --auth-mode=server.

    -

    f31e0c6f9 chore!: Remove deprecated fields (#5035)

    -

    Some fields that were deprecated in early 2020 have been removed.

    - - - - - - - - - - - - - - - - - -
    FieldAction
    template.template and template.templateRefThe workflow spec must be changed to use steps or DAG, otherwise the workflow will error.
    spec.ttlSecondsAfterFinishedchange to spec.ttlStrategy.secondsAfterCompletion, otherwise the workflow will not be garbage collected as expected.
    -

    To find impacted workflows:

    -
    kubectl get wf --all-namespaces -o yaml | grep templateRef
    -kubectl get wf --all-namespaces -o yaml | grep ttlSecondsAfterFinished
    -
    -

    c8215f972 feat(controller)!: Key-only artifacts. Fixes #3184 (#4618)

    -

    This change is not breaking per-se, but many users do not appear to aware of artifact repository ref, so check your usage of that feature if you have problems.

    - - - - -

    Comments

    - - +

    Upgrading Guide - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/use-cases/ci-cd/index.html b/use-cases/ci-cd/index.html index 13e537bd155f..53dfaae6d1b8 100644 --- a/use-cases/ci-cd/index.html +++ b/use-cases/ci-cd/index.html @@ -1,3985 +1,68 @@ - - - - - - - - - - - - - CI/CD - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + CI/CD - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    CI/CD

    -

    Docs

    - -

    Videos

    - - - - - -

    Comments

    - - +

    CI/CD - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/use-cases/data-processing/index.html b/use-cases/data-processing/index.html index 8f611333df7c..5da125a5d090 100644 --- a/use-cases/data-processing/index.html +++ b/use-cases/data-processing/index.html @@ -1,3998 +1,68 @@ - - - - - - - - - - - - - Data Processing - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Data Processing - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Data Processing

    -

    Docs

    - -

    Videos

    - -

    Books

    - - - - - -

    Comments

    - - +

    Data Processing - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/use-cases/infrastructure-automation/index.html b/use-cases/infrastructure-automation/index.html index 2ec6546f5ff9..fd3d1dabfd85 100644 --- a/use-cases/infrastructure-automation/index.html +++ b/use-cases/infrastructure-automation/index.html @@ -1,3984 +1,68 @@ - - - - - - - - - - - - - Infrastructure Automation - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Infrastructure Automation - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Infrastructure Automation

    -

    Docs

    - -

    Videos

    - - - - - -

    Comments

    - - +

    Infrastructure Automation - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/use-cases/machine-learning/index.html b/use-cases/machine-learning/index.html index e9dd0afef061..e18e80d7538f 100644 --- a/use-cases/machine-learning/index.html +++ b/use-cases/machine-learning/index.html @@ -1,4010 +1,68 @@ - - - - - - - - - - - - - Machine Learning - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Machine Learning - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/use-cases/other/index.html b/use-cases/other/index.html index 82fed31c5b8f..1e01934b113b 100644 --- a/use-cases/other/index.html +++ b/use-cases/other/index.html @@ -1,3964 +1,68 @@ - - - - - - - - - - - - - Other - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Other - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/use-cases/stream-processing/index.html b/use-cases/stream-processing/index.html index d51a8713887d..fffde181bd40 100644 --- a/use-cases/stream-processing/index.html +++ b/use-cases/stream-processing/index.html @@ -1,3913 +1,68 @@ - - - - - - - - - - - - - Stream Processing - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Stream Processing - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/use-cases/webhdfs/index.html b/use-cases/webhdfs/index.html index ad3d1cd81865..c5e85920d7f8 100644 --- a/use-cases/webhdfs/index.html +++ b/use-cases/webhdfs/index.html @@ -1,4026 +1,68 @@ - - - - - - - - - - - - - webHDFS via HTTP artifacts - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + webHDFS via HTTP artifacts - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    webHDFS via HTTP artifacts

    -

    webHDFS is a protocol allowing to access Hadoop or similar data storage via a unified REST API.

    -

    Input Artifacts

    -

    You can use HTTP artifacts to connect to webHDFS, where the URL will be the webHDFS endpoint including the file path and any query parameters. -Suppose your webHDFS endpoint is available under https://mywebhdfsprovider.com/webhdfs/v1/ and you have a file my-art.txt located in a data folder, which you want to use as an input artifact. To construct the URL, you append the file path to the base webHDFS endpoint and set the OPEN operation via query parameter. The result is: https://mywebhdfsprovider.com/webhdfs/v1/data/my-art.txt?op=OPEN. -See the below Workflow which will download the specified webHDFS artifact into the specified path:

    -
    spec:
    -  # ...
    -  inputs:
    -    artifacts:
    -    - name: my-art
    -    path: /my-artifact
    -    http:
    -      url: "https://mywebhdfsprovider.com/webhdfs/v1/file.txt?op=OPEN"
    -
    -

    Additional fields can be set for HTTP artifacts (for example, headers). See usage in the full webHDFS example.

    -

    Output Artifacts

    -

    To declare a webHDFS output artifact, instead use the CREATE operation and set the file path to your desired location. -In the below example, the artifact will be stored at outputs/newfile.txt. You can overwrite existing files with overwrite=true.

    -
    spec:
    -  # ...
    -  outputs:
    -    artifacts:
    -    - name: my-art
    -    path: /my-artifact
    -    http:
    -      url: "https://mywebhdfsprovider.com/webhdfs/v1/outputs/newfile.txt?op=CREATE&overwrite=true"
    -
    -

    Authentication

    -

    The above examples show minimal use cases without authentication. However, in a real-world scenario, you may want to use authentication. -The authentication mechanism is limited to those supported by HTTP artifacts:

    -
      -
    • HTTP Basic Auth
    • -
    • OAuth2
    • -
    • Client Certificates
    • -
    -

    Examples for the latter two mechanisms can be found in the full webHDFS example.

    -
    -

    Provider dependent

    -

    While your webHDFS provider may support the above mechanisms, Hadoop itself only supports authentication via Kerberos SPNEGO and Hadoop delegation token. HTTP artifacts do not currently support SPNEGO, but delegation tokens can be used via the delegation query parameter.

    -
    - - - - -

    Comments

    - - +

    webHDFS via HTTP artifacts - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/variables/index.html b/variables/index.html index 4ab7358ceede..25fe622b4408 100644 --- a/variables/index.html +++ b/variables/index.html @@ -1,4794 +1,68 @@ - - - - - - - - - - - - - Workflow Variables - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Workflow Variables - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Workflow Variables

    -

    Some fields in a workflow specification allow for variable references which are automatically substituted by Argo.

    -

    How to use variables

    -

    Variables are enclosed in curly braces:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: hello-world-parameters-
    -spec:
    -  entrypoint: whalesay
    -  arguments:
    -    parameters:
    -      - name: message
    -        value: hello world
    -  templates:
    -    - name: whalesay
    -      inputs:
    -        parameters:
    -          - name: message
    -      container:
    -        image: docker/whalesay
    -        command: [ cowsay ]
    -        args: [ "{{inputs.parameters.message}}" ]
    -
    -

    The following variables are made available to reference various meta-data of a workflow:

    -

    Template Tag Kinds

    -

    There are two kinds of template tag:

    -
      -
    • simple The default, e.g. {{workflow.name}}
    • -
    • expression Where{{ is immediately followed by =, e.g. {{=workflow.name}}.
    • -
    -

    Simple

    -

    The tag is substituted with the variable that has a name the same as the tag.

    -

    Simple tags may have white-space between the brackets and variable as seen below. However, there is a known issue where variables may fail to interpolate with white-space, so it is recommended to avoid using white-space until this issue is resolved. Please report unexpected behavior with reproducible examples.

    -
    args: [ "{{ inputs.parameters.message }}" ]
    -
    -

    Expression

    -
    -

    Since v3.1

    -
    -

    The tag is substituted with the result of evaluating the tag as an expression.

    -

    Note that any hyphenated parameter names or step names will cause a parsing error. You can reference them by -indexing into the parameter or step map, e.g. inputs.parameters['my-param'] or steps['my-step'].outputs.result.

    -

    Learn about the expression syntax.

    -

    Examples

    -

    Plain list:

    -
    [1, 2]
    -
    -

    Filter a list:

    -
    filter([1, 2], { # > 1})
    -
    -

    Map a list:

    -
    map([1, 2], { # * 2 })
    -
    -

    We provide some core functions:

    -

    Cast to int:

    -
    asInt(inputs.parameters['my-int-param'])
    -
    -

    Cast to float:

    -
    asFloat(inputs.parameters['my-float-param'])
    -
    -

    Cast to string:

    -
    string(1)
    -
    -

    Convert to a JSON string (needed for withParam):

    -
    toJson([1, 2])
    -
    -

    Extract data from JSON:

    -
    jsonpath(inputs.parameters.json, '$.some.path')
    -
    -

    You can also use Sprig functions:

    -

    Trim a string:

    -
    sprig.trim(inputs.parameters['my-string-param'])
    -
    -
    -

    Sprig error handling

    -

    Sprig functions often do not raise errors. -For example, if int is used on an invalid value, it returns 0. -Please review the Sprig documentation to understand which functions raise errors and which do not.

    -
    -

    Reference

    -

    All Templates

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    VariableDescription
    inputs.parameters.<NAME>Input parameter to a template
    inputs.parametersAll input parameters to a template as a JSON string
    inputs.artifacts.<NAME>Input artifact to a template
    node.nameFull name of the node
    -

    Steps Templates

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    VariableDescription
    steps.nameName of the step
    steps.<STEPNAME>.idunique id of container step
    steps.<STEPNAME>.ipIP address of a previous daemon container step
    steps.<STEPNAME>.statusPhase status of any previous step
    steps.<STEPNAME>.exitCodeExit code of any previous script or container step
    steps.<STEPNAME>.startedAtTime-stamp when the step started
    steps.<STEPNAME>.finishedAtTime-stamp when the step finished
    steps.<TASKNAME>.hostNodeNameHost node where task ran (available from version 3.5)
    steps.<STEPNAME>.outputs.resultOutput result of any previous container or script step
    steps.<STEPNAME>.outputs.parametersWhen the previous step uses withItems or withParams, this contains a JSON array of the output parameter maps of each invocation
    steps.<STEPNAME>.outputs.parameters.<NAME>Output parameter of any previous step. When the previous step uses withItems or withParams, this contains a JSON array of the output parameter values of each invocation
    steps.<STEPNAME>.outputs.artifacts.<NAME>Output artifact of any previous step
    -

    DAG Templates

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    VariableDescription
    tasks.nameName of the task
    tasks.<TASKNAME>.idunique id of container task
    tasks.<TASKNAME>.ipIP address of a previous daemon container task
    tasks.<TASKNAME>.statusPhase status of any previous task
    tasks.<TASKNAME>.exitCodeExit code of any previous script or container task
    tasks.<TASKNAME>.startedAtTime-stamp when the task started
    tasks.<TASKNAME>.finishedAtTime-stamp when the task finished
    tasks.<TASKNAME>.hostNodeNameHost node where task ran (available from version 3.5)
    tasks.<TASKNAME>.outputs.resultOutput result of any previous container or script task
    tasks.<TASKNAME>.outputs.parametersWhen the previous task uses withItems or withParams, this contains a JSON array of the output parameter maps of each invocation
    tasks.<TASKNAME>.outputs.parameters.<NAME>Output parameter of any previous task. When the previous task uses withItems or withParams, this contains a JSON array of the output parameter values of each invocation
    tasks.<TASKNAME>.outputs.artifacts.<NAME>Output artifact of any previous task
    -

    HTTP Templates

    -
    -

    Since v3.3

    -
    -

    Only available for successCondition

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    VariableDescription
    request.methodRequest method (string)
    request.urlRequest URL (string)
    request.bodyRequest body (string)
    request.headersRequest headers (map[string][]string)
    response.statusCodeResponse status code (int)
    response.bodyResponse body (string)
    response.headersResponse headers (map[string][]string)
    -

    RetryStrategy

    -

    When using the expression field within retryStrategy, special variables are available.

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    VariableDescription
    lastRetry.exitCodeExit code of the last retry
    lastRetry.statusStatus of the last retry
    lastRetry.durationDuration in seconds of the last retry
    lastRetry.messageMessage output from the last retry (available from version 3.5)
    -

    Note: These variables evaluate to a string type. If using advanced expressions, either cast them to int values (expression: "{{=asInt(lastRetry.exitCode) >= 2}}") or compare them to string values (expression: "{{=lastRetry.exitCode != '2'}}").

    -

    Container/Script Templates

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    VariableDescription
    pod.namePod name of the container/script
    retriesThe retry number of the container/script if retryStrategy is specified
    inputs.artifacts.<NAME>.pathLocal path of the input artifact
    outputs.artifacts.<NAME>.pathLocal path of the output artifact
    outputs.parameters.<NAME>.pathLocal path of the output parameter
    -

    Loops (withItems / withParam)

    - - - - - - - - - - - - - - - - - -
    VariableDescription
    itemValue of the item in a list
    item.<FIELDNAME>Field value of the item in a list of maps
    -

    Metrics

    -

    When emitting custom metrics in a template, special variables are available that allow self-reference to the current -step.

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    VariableDescription
    statusPhase status of the metric-emitting template
    durationDuration of the metric-emitting template in seconds (only applicable in Template-level metrics, for Workflow-level use workflow.duration)
    exitCodeExit code of the metric-emitting template
    inputs.parameters.<NAME>Input parameter of the metric-emitting template
    outputs.parameters.<NAME>Output parameter of the metric-emitting template
    outputs.resultOutput result of the metric-emitting template
    resourcesDuration.{cpu,memory}Resources duration in seconds. Must be one of resourcesDuration.cpu or resourcesDuration.memory, if available. For more info, see the Resource Duration doc.
    retriesRetried count by retry strategy
    -

    Real-Time Metrics

    -

    Some variables can be emitted in real-time (as opposed to just when the step/task completes). To emit these variables in -real time, set realtime: true under gauge (note: only Gauge metrics allow for real time variable emission). Metrics -currently available for real time emission:

    -

    For Workflow-level metrics:

    -
      -
    • workflow.duration
    • -
    -

    For Template-level metrics:

    -
      -
    • duration
    • -
    -

    Global

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    VariableDescription
    workflow.nameWorkflow name
    workflow.namespaceWorkflow namespace
    workflow.mainEntrypointWorkflow's initial entrypoint
    workflow.serviceAccountNameWorkflow service account name
    workflow.uidWorkflow UID. Useful for setting ownership reference to a resource, or a unique artifact location
    workflow.parameters.<NAME>Input parameter to the workflow
    workflow.parametersAll input parameters to the workflow as a JSON string (this is deprecated in favor of workflow.parameters.json as this doesn't work with expression tags and that does)
    workflow.parameters.jsonAll input parameters to the workflow as a JSON string
    workflow.outputs.parameters.<NAME>Global parameter in the workflow
    workflow.outputs.artifacts.<NAME>Global artifact in the workflow
    workflow.annotations.<NAME>Workflow annotations
    workflow.annotations.jsonall Workflow annotations as a JSON string
    workflow.labels.<NAME>Workflow labels
    workflow.labels.jsonall Workflow labels as a JSON string
    workflow.creationTimestampWorkflow creation time-stamp formatted in RFC 3339 (e.g. 2018-08-23T05:42:49Z)
    workflow.creationTimestamp.<STRFTIMECHAR>Creation time-stamp formatted with a strftime format character.
    workflow.creationTimestamp.RFC3339Creation time-stamp formatted with in RFC 3339.
    workflow.priorityWorkflow priority
    workflow.durationWorkflow duration estimate in seconds, may differ from actual duration by a couple of seconds
    workflow.scheduledTimeScheduled runtime formatted in RFC 3339 (only available for CronWorkflow)
    -

    Exit Handler

    - - - - - - - - - - - - - - - - - -
    VariableDescription
    workflow.statusWorkflow status. One of: Succeeded, Failed, Error
    workflow.failuresA list of JSON objects containing information about nodes that failed or errored during execution. Available fields: displayName, message, templateName, phase, podName, and finishedAt.
    -

    Knowing where you are

    -

    The idea with creating a WorkflowTemplate is that they are reusable bits of code you will use in many actual Workflows. Sometimes it is useful to know which workflow you are part of.

    -

    workflow.mainEntrypoint is one way you can do this. If each of your actual workflows has a differing entrypoint, you can identify the workflow you're part of. Given this use in a WorkflowTemplate:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: WorkflowTemplate
    -metadata:
    -  name: say-main-entrypoint
    -spec:
    -  entrypoint: echo
    -  templates:
    -  - name: echo
    -    container:
    -      image: alpine
    -      command: [echo]
    -      args: ["{{workflow.mainEntrypoint}}"]
    -
    -

    I can distinguish my caller:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: foo-
    -spec:
    -  entrypoint: foo
    -  templates:
    -    - name: foo
    -      steps:
    -      - - name: step
    -          templateRef:
    -            name: say-main-entrypoint
    -            template: echo
    -
    -

    results in a log of foo

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: bar-
    -spec:
    -  entrypoint: bar
    -  templates:
    -    - name: bar
    -      steps:
    -      - - name: step
    -          templateRef:
    -            name: say-main-entrypoint
    -            template: echo
    -
    -

    results in a log of bar

    -

    This shouldn't be that helpful in logging, you should be able to identify workflows through other labels in your cluster's log tool, but can be helpful when generating metrics for the workflow for example.

    - - - - -

    Comments

    - - +

    Workflow Variables - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/argo-cli/index.html b/walk-through/argo-cli/index.html index 2150773311ec..1ec8c08b57e9 100644 --- a/walk-through/argo-cli/index.html +++ b/walk-through/argo-cli/index.html @@ -1,3984 +1,68 @@ - - - - - - - - - - - - - Argo CLI - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Argo CLI - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Argo CLI

    -

    Installation

    -

    To install the Argo CLI, follow the instructions on the GitHub Releases page.

    -

    Usage

    -

    In case you want to follow along with this walk-through, here's a quick overview of the most useful argo command line interface (CLI) commands.

    -
    argo submit hello-world.yaml    # submit a workflow spec to Kubernetes
    -argo list                       # list current workflows
    -argo get hello-world-xxx        # get info about a specific workflow
    -argo logs hello-world-xxx       # print the logs from a workflow
    -argo delete hello-world-xxx     # delete workflow
    -
    -

    You can also run workflow specs directly using kubectl, but the Argo CLI provides syntax checking, nicer output, and requires less typing.

    -

    See the CLI Reference for more details.

    - - - - -

    Comments

    - - +

    Argo CLI - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/artifacts/index.html b/walk-through/artifacts/index.html index 4811fba8668d..38b159645b2b 100644 --- a/walk-through/artifacts/index.html +++ b/walk-through/artifacts/index.html @@ -1,4259 +1,68 @@ - - - - - - - - - - - - - Artifacts - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Artifacts - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Artifacts

    -
    -

    Note

    -

    You will need to configure an artifact repository to run this example.

    -
    -

    When running workflows, it is very common to have steps that generate or consume artifacts. Often, the output artifacts of one step may be used as input artifacts to a subsequent step.

    -

    The below workflow spec consists of two steps that run in sequence. The first step named generate-artifact will generate an artifact using the whalesay template that will be consumed by the second step named print-message that then consumes the generated artifact.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: artifact-passing-
    -spec:
    -  entrypoint: artifact-example
    -  templates:
    -  - name: artifact-example
    -    steps:
    -    - - name: generate-artifact
    -        template: whalesay
    -    - - name: consume-artifact
    -        template: print-message
    -        arguments:
    -          artifacts:
    -          # bind message to the hello-art artifact
    -          # generated by the generate-artifact step
    -          - name: message
    -            from: "{{steps.generate-artifact.outputs.artifacts.hello-art}}"
    -
    -  - name: whalesay
    -    container:
    -      image: docker/whalesay:latest
    -      command: [sh, -c]
    -      args: ["cowsay hello world | tee /tmp/hello_world.txt"]
    -    outputs:
    -      artifacts:
    -      # generate hello-art artifact from /tmp/hello_world.txt
    -      # artifacts can be directories as well as files
    -      - name: hello-art
    -        path: /tmp/hello_world.txt
    -
    -  - name: print-message
    -    inputs:
    -      artifacts:
    -      # unpack the message input artifact
    -      # and put it at /tmp/message
    -      - name: message
    -        path: /tmp/message
    -    container:
    -      image: alpine:latest
    -      command: [sh, -c]
    -      args: ["cat /tmp/message"]
    -
    -

    The whalesay template uses the cowsay command to generate a file named /tmp/hello-world.txt. It then outputs this file as an artifact named hello-art. In general, the artifact's path may be a directory rather than just a file. The print-message template takes an input artifact named message, unpacks it at the path named /tmp/message and then prints the contents of /tmp/message using the cat command. -The artifact-example template passes the hello-art artifact generated as an output of the generate-artifact step as the message input artifact to the print-message step. DAG templates use the tasks prefix to refer to another task, for example {{tasks.generate-artifact.outputs.artifacts.hello-art}}.

    -

    Optionally, for large artifacts, you can set podSpecPatch in the workflow spec to increase the resource request for the init container and avoid any Out of memory issues.

    -
    <... snipped ...>
    -  - name: large-artifact
    -    # below patch gets merged with the actual pod spec and increses the memory
    -    # request of the init container.
    -    podSpecPatch: |
    -      initContainers:
    -        - name: init
    -          resources:
    -            requests:
    -              memory: 2Gi
    -              cpu: 300m
    -    inputs:
    -      artifacts:
    -      - name: data
    -        path: /tmp/large-file
    -    container:
    -      image: alpine:latest
    -      command: [sh, -c]
    -      args: ["cat /tmp/large-file"]
    -<... snipped ...>
    -
    -

    Artifacts are packaged as Tarballs and gzipped by default. You may customize this behavior by specifying an archive strategy, using the archive field. For example:

    -
    <... snipped ...>
    -    outputs:
    -      artifacts:
    -        # default behavior - tar+gzip default compression.
    -      - name: hello-art-1
    -        path: /tmp/hello_world.txt
    -
    -        # disable archiving entirely - upload the file / directory as is.
    -        # this is useful when the container layout matches the desired target repository layout.   
    -      - name: hello-art-2
    -        path: /tmp/hello_world.txt
    -        archive:
    -          none: {}
    -
    -        # customize the compression behavior (disabling it here).
    -        # this is useful for files with varying compression benefits, 
    -        # e.g. disabling compression for a cached build workspace and large binaries, 
    -        # or increasing compression for "perfect" textual data - like a json/xml export of a large database.
    -      - name: hello-art-3
    -        path: /tmp/hello_world.txt
    -        archive:
    -          tar:
    -            # no compression (also accepts the standard gzip 1 to 9 values)
    -            compressionLevel: 0
    -<... snipped ...>
    -
    -

    Artifact Garbage Collection

    -

    As of version 3.4 you can configure your Workflow to automatically delete Artifacts that you don't need (visit artifact repository capability for the current supported store engine).

    -

    Artifacts can be deleted OnWorkflowCompletion or OnWorkflowDeletion. You can specify your Garbage Collection strategy on both the Workflow level and the Artifact level, so for example, you may have temporary artifacts that can be deleted right away but a final output that should be persisted:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: artifact-gc-
    -spec:
    -  entrypoint: main
    -  artifactGC:
    -    strategy: OnWorkflowDeletion  # default Strategy set here applies to all Artifacts by default
    -  templates:
    -    - name: main
    -      container:
    -        image: argoproj/argosay:v2
    -        command:
    -          - sh
    -          - -c
    -        args:
    -          - |
    -            echo "can throw this away" > /tmp/temporary-artifact.txt
    -            echo "keep this" > /tmp/keep-this.txt
    -      outputs:
    -        artifacts:
    -          - name: temporary-artifact
    -            path: /tmp/temporary-artifact.txt
    -            s3:
    -              key: temporary-artifact.txt
    -          - name: keep-this
    -            path: /tmp/keep-this.txt
    -            s3:
    -              key: keep-this.txt
    -            artifactGC:
    -              strategy: Never   # optional override for an Artifact
    -
    -

    Artifact Naming

    -

    Consider parameterizing your S3 keys by {{workflow.uid}}, etc (as shown in the example above) if there's a possibility that you could have concurrent Workflows of the same spec. This would be to avoid a scenario in which the artifact from one Workflow is being deleted while the same S3 key is being generated for a different Workflow.

    -

    Service Accounts and Annotations

    -

    Does your S3 bucket require you to run with a special Service Account or IAM Role Annotation? You can either use the same ones you use for creating artifacts or generate new ones that are specific for deletion permission. Generally users will probably just have a single Service Account or IAM Role to apply to all artifacts for the Workflow, but you can also customize on the artifact level if you need that:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: artifact-gc-
    -spec:
    -  entrypoint: main
    -  artifactGC:
    -    strategy: OnWorkflowDeletion 
    -    ##############################################################################################
    -    #    Workflow Level Service Account and Metadata
    -    ##############################################################################################
    -    serviceAccountName: my-sa
    -    podMetadata:
    -      annotations:
    -        eks.amazonaws.com/role-arn: arn:aws:iam::111122223333:role/my-iam-role
    -  templates:
    -    - name: main
    -      container:
    -        image: argoproj/argosay:v2
    -        command:
    -          - sh
    -          - -c
    -        args:
    -          - |
    -            echo "can throw this away" > /tmp/temporary-artifact.txt
    -            echo "keep this" > /tmp/keep-this.txt
    -      outputs:
    -        artifacts:
    -          - name: temporary-artifact
    -            path: /tmp/temporary-artifact.txt
    -            s3:
    -              key: temporary-artifact-{{workflow.uid}}.txt
    -            artifactGC:
    -              ####################################################################################
    -              #    Optional override capability
    -              ####################################################################################
    -              serviceAccountName: artifact-specific-sa
    -              podMetadata:
    -                annotations:
    -                  eks.amazonaws.com/role-arn: arn:aws:iam::111122223333:role/artifact-specific-iam-role
    -          - name: keep-this
    -            path: /tmp/keep-this.txt
    -            s3:
    -              key: keep-this-{{workflow.uid}}.txt
    -            artifactGC:
    -              strategy: Never
    -
    -

    If you do supply your own Service Account you will need to create a RoleBinding that binds it with a role like this:

    -
    apiVersion: rbac.authorization.k8s.io/v1
    -kind: Role
    -metadata:
    -  annotations:
    -    workflows.argoproj.io/description: |
    -      This is the minimum recommended permissions needed if you want to use artifact GC.
    -  name: artifactgc
    -rules:
    -- apiGroups:
    -  - argoproj.io
    -  resources:
    -  - workflowartifactgctasks
    -  verbs:
    -  - list
    -  - watch
    -- apiGroups:
    -  - argoproj.io
    -  resources:
    -  - workflowartifactgctasks/status
    -  verbs:
    -  - patch
    -
    -

    This is the artifactgc role if you installed using one of the quick-start manifest files. If you installed with the install.yaml file for the release then the same permissions are in the argo-cluster-role.

    -

    If you don't use your own ServiceAccount and are just using default ServiceAccount, then the role needs a RoleBinding or ClusterRoleBinding to default ServiceAccount.

    -

    What happens if Garbage Collection fails?

    -

    If deletion of the artifact fails for some reason (other than the Artifact already having been deleted which is not considered a failure), the Workflow's Status will be marked with a new Condition to indicate "Artifact GC Failure", a Kubernetes Event will be issued, and the Argo Server UI will also indicate the failure. For additional debugging, the user should find 1 or more Pods named <wfName>-artgc-* and can view the logs.

    -

    If the user needs to delete the Workflow and its child CRD objects, they will need to patch the Workflow to remove the finalizer preventing the deletion:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -  finalizers:
    -  - workflows.argoproj.io/artifact-gc
    -
    -

    The finalizer can be deleted by doing:

    -
    kubectl patch workflow my-wf \
    -    --type json \
    -    --patch='[ { "op": "remove", "path": "/metadata/finalizers" } ]'
    -
    -

    Or for simplicity use the Argo CLI argo delete command with flag --force, which under the hood removes the finalizer before performing the deletion.

    -

    Release Versions >= 3.5

    -

    A flag has been added to the Workflow Spec called forceFinalizerRemoval (see here) to force the finalizer's removal even if Artifact GC fails:

    -
    spec:
    -  artifactGC:
    -    strategy: OnWorkflowDeletion 
    -    forceFinalizerRemoval: true
    -
    - - - - -

    Comments

    - - +

    Artifacts - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/conditionals/index.html b/walk-through/conditionals/index.html index 43335a264910..3f0cc40eaace 100644 --- a/walk-through/conditionals/index.html +++ b/walk-through/conditionals/index.html @@ -1,3987 +1,68 @@ - - - - - - - - - - - - - Conditionals - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Conditionals - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Conditionals

    -

    We also support conditional execution. The syntax is implemented by govaluate which offers the support for complex syntax. See in the example:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: coinflip-
    -spec:
    -  entrypoint: coinflip
    -  templates:
    -  - name: coinflip
    -    steps:
    -    # flip a coin
    -    - - name: flip-coin
    -        template: flip-coin
    -    # evaluate the result in parallel
    -    - - name: heads
    -        template: heads                       # call heads template if "heads"
    -        when: "{{steps.flip-coin.outputs.result}} == heads"
    -      - name: tails
    -        template: tails                       # call tails template if "tails"
    -        when: "{{steps.flip-coin.outputs.result}} == tails"
    -    - - name: flip-again
    -        template: flip-coin
    -    - - name: complex-condition
    -        template: heads-tails-or-twice-tails
    -        # call heads template if first flip was "heads" and second was "tails" OR both were "tails"
    -        when: >-
    -            ( {{steps.flip-coin.outputs.result}} == heads &&
    -              {{steps.flip-again.outputs.result}} == tails
    -            ) ||
    -            ( {{steps.flip-coin.outputs.result}} == tails &&
    -              {{steps.flip-again.outputs.result}} == tails )
    -      - name: heads-regex
    -        template: heads                       # call heads template if ~ "hea"
    -        when: "{{steps.flip-again.outputs.result}} =~ hea"
    -      - name: tails-regex
    -        template: tails                       # call heads template if ~ "tai"
    -        when: "{{steps.flip-again.outputs.result}} =~ tai"
    -
    -  # Return heads or tails based on a random number
    -  - name: flip-coin
    -    script:
    -      image: python:alpine3.6
    -      command: [python]
    -      source: |
    -        import random
    -        result = "heads" if random.randint(0,1) == 0 else "tails"
    -        print(result)
    -
    -  - name: heads
    -    container:
    -      image: alpine:3.6
    -      command: [sh, -c]
    -      args: ["echo \"it was heads\""]
    -
    -  - name: tails
    -    container:
    -      image: alpine:3.6
    -      command: [sh, -c]
    -      args: ["echo \"it was tails\""]
    -
    -  - name: heads-tails-or-twice-tails
    -    container:
    -      image: alpine:3.6
    -      command: [sh, -c]
    -      args: ["echo \"it was heads the first flip and tails the second. Or it was two times tails.\""]
    -
    -
    -

    Nested Quotes

    -

    If the parameter value contains quotes, it may invalidate the govaluate expression. -To handle parameters with quotes, embed an expr expression in the conditional. -For example:

    -
    - -
    when: "{{=inputs.parameters['may-contain-quotes'] == 'example'}}"
    -
    - - - - -

    Comments

    - - +

    Conditionals - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/continuous-integration-examples/index.html b/walk-through/continuous-integration-examples/index.html index fc0f14e1f16f..cda4827ae607 100644 --- a/walk-through/continuous-integration-examples/index.html +++ b/walk-through/continuous-integration-examples/index.html @@ -1,3924 +1,68 @@ - - - - - - - - - - - - - Continuous Integration Examples - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Continuous Integration Examples - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Continuous Integration Examples

    -

    Continuous integration is a popular application for workflows.

    -

    Some quick examples of CI workflows:

    - -

    And a CI WorkflowTemplate example:

    - -

    A more detailed example is https://github.com/sendible-labs/argo-workflows-ci-example, which allows you to -create a local CI workflow for the purposes of learning.

    - - - - -

    Comments

    - - +

    Continuous Integration Examples - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/custom-template-variable-reference/index.html b/walk-through/custom-template-variable-reference/index.html index 4705a4cea20f..521c3348f9c7 100644 --- a/walk-through/custom-template-variable-reference/index.html +++ b/walk-through/custom-template-variable-reference/index.html @@ -1,3947 +1,68 @@ - - - - - - - - - - - - - Custom Template Variable Reference - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Custom Template Variable Reference - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Custom Template Variable Reference

    -

    In this example, we can see how we can use the other template language variable reference (E.g: Jinja) in Argo workflow template. -Argo will validate and resolve only the variable that starts with an Argo allowed prefix -{"item", "steps", "inputs", "outputs", "workflow", "tasks"}

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: custom-template-variable-
    -spec:
    -  entrypoint: hello-hello-hello
    -
    -  templates:
    -    - name: hello-hello-hello
    -      steps:
    -        - - name: hello1
    -            template: whalesay
    -            arguments:
    -              parameters: [{name: message, value: "hello1"}]
    -        - - name: hello2a
    -            template: whalesay
    -            arguments:
    -              parameters: [{name: message, value: "hello2a"}]
    -          - name: hello2b
    -            template: whalesay
    -            arguments:
    -              parameters: [{name: message, value: "hello2b"}]
    -
    -    - name: whalesay
    -      inputs:
    -        parameters:
    -          - name: message
    -      container:
    -        image: docker/whalesay
    -        command: [cowsay]
    -        args: ["{{user.username}}"]
    -
    - - - - -

    Comments

    - - +

    Custom Template Variable Reference - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/daemon-containers/index.html b/walk-through/daemon-containers/index.html index 57f63f58c103..c9e297065420 100644 --- a/walk-through/daemon-containers/index.html +++ b/walk-through/daemon-containers/index.html @@ -1,3985 +1,68 @@ - - - - - - - - - - - - - Daemon Containers - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Daemon Containers - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Daemon Containers

    -

    Argo workflows can start containers that run in the background (also known as daemon containers) while the workflow itself continues execution. Note that the daemons will be automatically destroyed when the workflow exits the template scope in which the daemon was invoked. Daemon containers are useful for starting up services to be tested or to be used in testing (e.g., fixtures). We also find it very useful when running large simulations to spin up a database as a daemon for collecting and organizing the results. The big advantage of daemons compared with sidecars is that their existence can persist across multiple steps or even the entire workflow.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: daemon-step-
    -spec:
    -  entrypoint: daemon-example
    -  templates:
    -  - name: daemon-example
    -    steps:
    -    - - name: influx
    -        template: influxdb              # start an influxdb as a daemon (see the influxdb template spec below)
    -
    -    - - name: init-database             # initialize influxdb
    -        template: influxdb-client
    -        arguments:
    -          parameters:
    -          - name: cmd
    -            value: curl -XPOST 'http://{{steps.influx.ip}}:8086/query' --data-urlencode "q=CREATE DATABASE mydb"
    -
    -    - - name: producer-1                # add entries to influxdb
    -        template: influxdb-client
    -        arguments:
    -          parameters:
    -          - name: cmd
    -            value: for i in $(seq 1 20); do curl -XPOST 'http://{{steps.influx.ip}}:8086/write?db=mydb' -d "cpu,host=server01,region=uswest load=$i" ; sleep .5 ; done
    -      - name: producer-2                # add entries to influxdb
    -        template: influxdb-client
    -        arguments:
    -          parameters:
    -          - name: cmd
    -            value: for i in $(seq 1 20); do curl -XPOST 'http://{{steps.influx.ip}}:8086/write?db=mydb' -d "cpu,host=server02,region=uswest load=$((RANDOM % 100))" ; sleep .5 ; done
    -      - name: producer-3                # add entries to influxdb
    -        template: influxdb-client
    -        arguments:
    -          parameters:
    -          - name: cmd
    -            value: curl -XPOST 'http://{{steps.influx.ip}}:8086/write?db=mydb' -d 'cpu,host=server03,region=useast load=15.4'
    -
    -    - - name: consumer                  # consume intries from influxdb
    -        template: influxdb-client
    -        arguments:
    -          parameters:
    -          - name: cmd
    -            value: curl --silent -G http://{{steps.influx.ip}}:8086/query?pretty=true --data-urlencode "db=mydb" --data-urlencode "q=SELECT * FROM cpu"
    -
    -  - name: influxdb
    -    daemon: true                        # start influxdb as a daemon
    -    retryStrategy:
    -      limit: 10                         # retry container if it fails
    -    container:
    -      image: influxdb:1.2
    -      command:
    -      - influxd
    -      readinessProbe:                   # wait for readinessProbe to succeed
    -        httpGet:
    -          path: /ping
    -          port: 8086
    -
    -  - name: influxdb-client
    -    inputs:
    -      parameters:
    -      - name: cmd
    -    container:
    -      image: appropriate/curl:latest
    -      command: ["/bin/sh", "-c"]
    -      args: ["{{inputs.parameters.cmd}}"]
    -      resources:
    -        requests:
    -          memory: 32Mi
    -          cpu: 100m
    -
    -

    Step templates use the steps prefix to refer to another step: for example {{steps.influx.ip}}. In DAG templates, the tasks prefix is used instead: for example {{tasks.influx.ip}}.

    - - - - -

    Comments

    - - +

    Daemon Containers - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/dag/index.html b/walk-through/dag/index.html index 3df6d369a8f1..1f4265a3771a 100644 --- a/walk-through/dag/index.html +++ b/walk-through/dag/index.html @@ -1,4022 +1,68 @@ - - - - - - - - - - - - - DAG - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + DAG - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    DAG

    -

    As an alternative to specifying sequences of steps, you can define a workflow as a directed-acyclic graph (DAG) by specifying the dependencies of each task. -DAGs can be simpler to maintain for complex workflows and allow for maximum parallelism when running tasks.

    -

    In the following workflow, step A runs first, as it has no dependencies. -Once A has finished, steps B and C run in parallel. -Finally, once B and C have completed, step D runs.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: dag-diamond-
    -spec:
    -  entrypoint: diamond
    -  templates:
    -  - name: echo
    -    inputs:
    -      parameters:
    -      - name: message
    -    container:
    -      image: alpine:3.7
    -      command: [echo, "{{inputs.parameters.message}}"]
    -  - name: diamond
    -    dag:
    -      tasks:
    -      - name: A
    -        template: echo
    -        arguments:
    -          parameters: [{name: message, value: A}]
    -      - name: B
    -        dependencies: [A]
    -        template: echo
    -        arguments:
    -          parameters: [{name: message, value: B}]
    -      - name: C
    -        dependencies: [A]
    -        template: echo
    -        arguments:
    -          parameters: [{name: message, value: C}]
    -      - name: D
    -        dependencies: [B, C]
    -        template: echo
    -        arguments:
    -          parameters: [{name: message, value: D}]
    -
    -

    The dependency graph may have multiple roots. -The templates called from a DAG or steps template can themselves be DAG or steps templates, allowing complex workflows to be split into manageable pieces.

    -

    Enhanced Depends

    -

    For more complicated, conditional dependencies, you can use the Enhanced Depends feature.

    -

    Fail Fast

    -

    By default, DAGs fail fast: when one task fails, no new tasks will be scheduled. -Once all running tasks are completed, the DAG will be marked as failed.

    -

    If failFast is set to false for a DAG, all branches will run to completion, regardless of failures in other branches.

    - - - - -

    Comments

    - - +

    DAG - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/docker-in-docker-using-sidecars/index.html b/walk-through/docker-in-docker-using-sidecars/index.html index 7b1d92e56fbf..270c22164b0e 100644 --- a/walk-through/docker-in-docker-using-sidecars/index.html +++ b/walk-through/docker-in-docker-using-sidecars/index.html @@ -1,3945 +1,68 @@ - - - - - - - - - - - - - Docker-in-Docker Using Sidecars - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Docker-in-Docker Using Sidecars - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Docker-in-Docker Using Sidecars

    -

    Note: It is increasingly unlikely that the below example will work for you on your version of Kubernetes. Since Kubernetes 1.24, the dockershim has been unavailable as part of Kubernetes, rendering Docker-in-Docker unworkable. It is recommended to seek alternative methods of building containers, such as Kaniko or Buildkit. A Buildkit Workflow example is available in the examples directory of the Argo Workflows repository.

    -
    -

    An application of sidecars is to implement Docker-in-Docker (DIND). DIND is useful when you want to run Docker commands from inside a container. For example, you may want to build and push a container image from inside your build container. In the following example, we use the docker:dind image to run a Docker daemon in a sidecar and give the main container access to the daemon.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: sidecar-dind-
    -spec:
    -  entrypoint: dind-sidecar-example
    -  templates:
    -  - name: dind-sidecar-example
    -    container:
    -      image: docker:19.03.13
    -      command: [sh, -c]
    -      args: ["until docker ps; do sleep 3; done; docker run --rm debian:latest cat /etc/os-release"]
    -      env:
    -      - name: DOCKER_HOST               # the docker daemon can be access on the standard port on localhost
    -        value: 127.0.0.1
    -    sidecars:
    -    - name: dind
    -      image: docker:19.03.13-dind          # Docker already provides an image for running a Docker daemon
    -      command: [dockerd-entrypoint.sh]
    -      env:
    -        - name: DOCKER_TLS_CERTDIR         # Docker TLS env config
    -          value: ""
    -      securityContext:
    -        privileged: true                # the Docker daemon can only run in a privileged container
    -      # mirrorVolumeMounts will mount the same volumes specified in the main container
    -      # to the sidecar (including artifacts), at the same mountPaths. This enables
    -      # dind daemon to (partially) see the same filesystem as the main container in
    -      # order to use features such as docker volume binding.
    -      mirrorVolumeMounts: true
    -
    - - - - -

    Comments

    - - +

    Docker-in-Docker Using Sidecars - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/exit-handlers/index.html b/walk-through/exit-handlers/index.html index 8ee0421e7a92..d952750a161f 100644 --- a/walk-through/exit-handlers/index.html +++ b/walk-through/exit-handlers/index.html @@ -1,3965 +1,68 @@ - - - - - - - - - - - - - Exit handlers - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Exit handlers - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Exit handlers

    -

    An exit handler is a template that always executes, irrespective of success or failure, at the end of the workflow.

    -

    Some common use cases of exit handlers are:

    -
      -
    • cleaning up after a workflow runs
    • -
    • sending notifications of workflow status (e.g., e-mail/Slack)
    • -
    • posting the pass/fail status to a web-hook result (e.g. GitHub build result)
    • -
    • resubmitting or submitting another workflow
    • -
    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: exit-handlers-
    -spec:
    -  entrypoint: intentional-fail
    -  onExit: exit-handler                  # invoke exit-handler template at end of the workflow
    -  templates:
    -  # primary workflow template
    -  - name: intentional-fail
    -    container:
    -      image: alpine:latest
    -      command: [sh, -c]
    -      args: ["echo intentional failure; exit 1"]
    -
    -  # Exit handler templates
    -  # After the completion of the entrypoint template, the status of the
    -  # workflow is made available in the global variable {{workflow.status}}.
    -  # {{workflow.status}} will be one of: Succeeded, Failed, Error
    -  - name: exit-handler
    -    steps:
    -    - - name: notify
    -        template: send-email
    -      - name: celebrate
    -        template: celebrate
    -        when: "{{workflow.status}} == Succeeded"
    -      - name: cry
    -        template: cry
    -        when: "{{workflow.status}} != Succeeded"
    -  - name: send-email
    -    container:
    -      image: alpine:latest
    -      command: [sh, -c]
    -      args: ["echo send e-mail: {{workflow.name}} {{workflow.status}} {{workflow.duration}}"]
    -  - name: celebrate
    -    container:
    -      image: alpine:latest
    -      command: [sh, -c]
    -      args: ["echo hooray!"]
    -  - name: cry
    -    container:
    -      image: alpine:latest
    -      command: [sh, -c]
    -      args: ["echo boohoo!"]
    -
    - - - - -

    Comments

    - - +

    Exit handlers - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/hardwired-artifacts/index.html b/walk-through/hardwired-artifacts/index.html index f27671d6d86d..e83230468fc6 100644 --- a/walk-through/hardwired-artifacts/index.html +++ b/walk-through/hardwired-artifacts/index.html @@ -1,3954 +1,68 @@ - - - - - - - - - - - - - Hardwired Artifacts - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Hardwired Artifacts - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Hardwired Artifacts

    -

    You can use any container image to generate any kind of artifact. In practice, however, certain types of artifacts are very common, so there is built-in support for git, HTTP, GCS, and S3 artifacts.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: hardwired-artifact-
    -spec:
    -  entrypoint: hardwired-artifact
    -  templates:
    -  - name: hardwired-artifact
    -    inputs:
    -      artifacts:
    -      # Check out the main branch of the argo repo and place it at /src
    -      # revision can be anything that git checkout accepts: branch, commit, tag, etc.
    -      - name: argo-source
    -        path: /src
    -        git:
    -          repo: https://github.com/argoproj/argo-workflows.git
    -          revision: "main"
    -      # Download kubectl 1.8.0 and place it at /bin/kubectl
    -      - name: kubectl
    -        path: /bin/kubectl
    -        mode: 0755
    -        http:
    -          url: https://storage.googleapis.com/kubernetes-release/release/v1.8.0/bin/linux/amd64/kubectl
    -      # Copy an s3 compatible artifact repository bucket (such as AWS, GCS and MinIO) and place it at /s3
    -      - name: objects
    -        path: /s3
    -        s3:
    -          endpoint: storage.googleapis.com
    -          bucket: my-bucket-name
    -          key: path/in/bucket
    -          accessKeySecret:
    -            name: my-s3-credentials
    -            key: accessKey
    -          secretKeySecret:
    -            name: my-s3-credentials
    -            key: secretKey
    -    container:
    -      image: debian
    -      command: [sh, -c]
    -      args: ["ls -l /src /bin/kubectl /s3"]
    -
    - - - - -

    Comments

    - - +

    Hardwired Artifacts - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/hello-world/index.html b/walk-through/hello-world/index.html index 5222e5637494..7155342bede4 100644 --- a/walk-through/hello-world/index.html +++ b/walk-through/hello-world/index.html @@ -1,3959 +1,68 @@ - - - - - - - - - - - - - Hello World - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Hello World - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Hello World

    -

    Let's start by creating a very simple workflow template to echo "hello world" using the docker/whalesay container -image from Docker Hub.

    -

    You can run this directly from your shell with a simple docker command:

    -
    $ docker run docker/whalesay cowsay "hello world"
    - _____________
    -< hello world >
    - -------------
    -    \
    -     \
    -      \
    -                    ##        .
    -              ## ## ##       ==
    -           ## ## ## ##      ===
    -       /""""""""""""""""___/ ===
    -  ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ /  ===- ~~~
    -       \______ o          __/
    -        \    \        __/
    -          \____\______/
    -
    -
    -Hello from Docker!
    -This message shows that your installation appears to be working correctly.
    -
    -

    Below, we run the same container on a Kubernetes cluster using an Argo workflow template. Be sure to read the comments -as they provide useful explanations.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow                  # new type of k8s spec
    -metadata:
    -  generateName: hello-world-    # name of the workflow spec
    -spec:
    -  entrypoint: whalesay          # invoke the whalesay template
    -  templates:
    -    - name: whalesay              # name of the template
    -      container:
    -        image: docker/whalesay
    -        command: [ cowsay ]
    -        args: [ "hello world" ]
    -        resources: # limit the resources
    -          limits:
    -            memory: 32Mi
    -            cpu: 100m
    -
    -

    Argo adds a new kind of Kubernetes spec called a Workflow. The above spec contains a single template -called whalesay which runs the docker/whalesay container and invokes cowsay "hello world". The whalesay template -is the entrypoint for the spec. The entrypoint specifies the initial template that should be invoked when the workflow -spec is executed by Kubernetes. Being able to specify the entrypoint is more useful when there is more than one template -defined in the Kubernetes workflow spec. :-)

    - - - - -

    Comments

    - - +

    Hello World - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/index.html b/walk-through/index.html index e95d5d72e2a0..8162ea82343f 100644 --- a/walk-through/index.html +++ b/walk-through/index.html @@ -1,3922 +1,68 @@ - - - - - - - - - - - - - About - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + About - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    About

    -

    Argo is implemented as a Kubernetes CRD (Custom Resource Definition). As a result, Argo workflows can be managed -using kubectl and natively integrates with other Kubernetes services such as volumes, secrets, and RBAC. The new Argo -software is light-weight and installs in under a minute, and provides complete workflow features including parameter -substitution, artifacts, fixtures, loops and recursive workflows.

    -

    Dozens of examples are available in -the examples directory on GitHub.

    -

    For a complete description of the Argo workflow spec, please refer -to the spec documentation.

    -

    Progress through these examples in sequence to learn all the basics.

    -

    Start with Argo CLI.

    - - - - -

    Comments

    - - +

    About - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/kubernetes-resources/index.html b/walk-through/kubernetes-resources/index.html index 5b7df2ecc1b3..4f9813f78f82 100644 --- a/walk-through/kubernetes-resources/index.html +++ b/walk-through/kubernetes-resources/index.html @@ -1,3983 +1,68 @@ - - - - - - - - - - - - - Kubernetes Resources - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Kubernetes Resources - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Kubernetes Resources

    -

    In many cases, you will want to manage Kubernetes resources from Argo workflows. The resource template allows you to create, delete or updated any type of Kubernetes resource.

    -
    # in a workflow. The resource template type accepts any k8s manifest
    -# (including CRDs) and can perform any `kubectl` action against it (e.g. create,
    -# apply, delete, patch).
    -apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: k8s-jobs-
    -spec:
    -  entrypoint: pi-tmpl
    -  templates:
    -  - name: pi-tmpl
    -    resource:                   # indicates that this is a resource template
    -      action: create            # can be any kubectl action (e.g. create, delete, apply, patch)
    -      # The successCondition and failureCondition are optional expressions.
    -      # If failureCondition is true, the step is considered failed.
    -      # If successCondition is true, the step is considered successful.
    -      # They use kubernetes label selection syntax and can be applied against any field
    -      # of the resource (not just labels). Multiple AND conditions can be represented by comma
    -      # delimited expressions.
    -      # For more details: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
    -      successCondition: status.succeeded > 0
    -      failureCondition: status.failed > 3
    -      manifest: |               #put your kubernetes spec here
    -        apiVersion: batch/v1
    -        kind: Job
    -        metadata:
    -          generateName: pi-job-
    -        spec:
    -          template:
    -            metadata:
    -              name: pi
    -            spec:
    -              containers:
    -              - name: pi
    -                image: perl
    -                command: ["perl",  "-Mbignum=bpi", "-wle", "print bpi(2000)"]
    -              restartPolicy: Never
    -          backoffLimit: 4
    -
    -

    Note: -Currently only a single resource can be managed by a resource template so either a generateName or name must be provided in the resource's meta-data.

    -

    Resources created in this way are independent of the workflow. If you want the resource to be deleted when the workflow is deleted then you can use Kubernetes garbage collection with the workflow resource as an owner reference (example).

    -

    You can also collect data about the resource in output parameters (see more at k8s-jobs.yaml)

    -

    Note: -When patching, the resource will accept another attribute, mergeStrategy, which can either be strategic, merge, or json. If this attribute is not supplied, it will default to strategic. Keep in mind that Custom Resources cannot be patched with strategic, so a different strategy must be chosen. For example, suppose you have the CronTab CRD defined, and the following instance of a CronTab:

    -
    apiVersion: "stable.example.com/v1"
    -kind: CronTab
    -spec:
    -  cronSpec: "* * * * */5"
    -  image: my-awesome-cron-image
    -
    -

    This CronTab can be modified using the following Argo Workflow:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: k8s-patch-
    -spec:
    -  entrypoint: cront-tmpl
    -  templates:
    -  - name: cront-tmpl
    -    resource:
    -      action: patch
    -      mergeStrategy: merge                 # Must be one of [strategic merge json]
    -      manifest: |
    -        apiVersion: "stable.example.com/v1"
    -        kind: CronTab
    -        spec:
    -          cronSpec: "* * * * */10"
    -          image: my-awesome-cron-image
    -
    - - - - -

    Comments

    - - +

    Kubernetes Resources - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/loops/index.html b/walk-through/loops/index.html index 82c03707f2dc..7434c396cc69 100644 --- a/walk-through/loops/index.html +++ b/walk-through/loops/index.html @@ -1,4233 +1,68 @@ - - - - - - - - - - - - - Loops - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Loops - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - - - - +
    +
    +
    +
    - - - - - - - - -

    Loops

    -

    When writing workflows, it is often very useful to be able to iterate over a set of inputs, as this is how argo-workflows can perform loops.

    -

    There are two basic ways of running a template multiple times.

    -
      -
    • withItems takes a list of things to work on. Either
        -
      • plain, single values, which are then usable in your template as '{{item}}'
      • -
      • a JSON object where each element in the object can be addressed by it's key as '{{item.key}}'
      • -
      -
    • -
    • withParam takes a JSON array of items, and iterates over it - again the items can be objects like with withItems. This is very powerful, as you can generate the JSON in another step in your workflow, so creating a dynamic workflow.
    • -
    -

    withItems basic example

    -

    This example is the simplest. We are taking a basic list of items and iterating over it with withItems. It is limited to one varying field for each of the workflow templates instantiated.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: loops-
    -spec:
    -  entrypoint: loop-example
    -  templates:
    -  - name: loop-example
    -    steps:
    -    - - name: print-message
    -        template: whalesay
    -        arguments:
    -          parameters:
    -          - name: message
    -            value: "{{item}}"
    -        withItems:              # invoke whalesay once for each item in parallel
    -        - hello world           # item 1
    -        - goodbye world         # item 2
    -
    -  - name: whalesay
    -    inputs:
    -      parameters:
    -      - name: message
    -    container:
    -      image: docker/whalesay:latest
    -      command: [cowsay]
    -      args: ["{{inputs.parameters.message}}"]
    -
    -

    withItems more complex example

    -

    If we'd like to pass more than one piece of information in each workflow, you can instead use a JSON object for each entry in withItems and then address the elements by key, as shown in this example.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: loops-maps-
    -spec:
    -  entrypoint: loop-map-example
    -  templates:
    -  - name: loop-map-example # parameter specifies the list to iterate over
    -    steps:
    -    - - name: test-linux
    -        template: cat-os-release
    -        arguments:
    -          parameters:
    -          - name: image
    -            value: "{{item.image}}"
    -          - name: tag
    -            value: "{{item.tag}}"
    -        withItems:
    -        - { image: 'debian', tag: '9.1' }       #item set 1
    -        - { image: 'debian', tag: '8.9' }       #item set 2
    -        - { image: 'alpine', tag: '3.6' }       #item set 3
    -        - { image: 'ubuntu', tag: '17.10' }     #item set 4
    -
    -  - name: cat-os-release
    -    inputs:
    -      parameters:
    -      - name: image
    -      - name: tag
    -    container:
    -      image: "{{inputs.parameters.image}}:{{inputs.parameters.tag}}"
    -      command: [cat]
    -      args: [/etc/os-release]
    -
    -

    withParam example

    -

    This example does exactly the same job as the previous example, but using withParam to pass the information as a JSON array argument, instead of hard-coding it into the template.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: loops-param-arg-
    -spec:
    -  entrypoint: loop-param-arg-example
    -  arguments:
    -    parameters:
    -    - name: os-list                                     # a list of items
    -      value: |
    -        [
    -          { "image": "debian", "tag": "9.1" },
    -          { "image": "debian", "tag": "8.9" },
    -          { "image": "alpine", "tag": "3.6" },
    -          { "image": "ubuntu", "tag": "17.10" }
    -        ]
    -
    -  templates:
    -  - name: loop-param-arg-example
    -    inputs:
    -      parameters:
    -      - name: os-list
    -    steps:
    -    - - name: test-linux
    -        template: cat-os-release
    -        arguments:
    -          parameters:
    -          - name: image
    -            value: "{{item.image}}"
    -          - name: tag
    -            value: "{{item.tag}}"
    -        withParam: "{{inputs.parameters.os-list}}"      # parameter specifies the list to iterate over
    -
    -  # This template is the same as in the previous example
    -  - name: cat-os-release
    -    inputs:
    -      parameters:
    -      - name: image
    -      - name: tag
    -    container:
    -      image: "{{inputs.parameters.image}}:{{inputs.parameters.tag}}"
    -      command: [cat]
    -      args: [/etc/os-release]
    -
    -

    withParam example from another step in the workflow

    -

    Finally, the most powerful form of this is to generate that JSON array of objects dynamically in one step, and then pass it to the next step so that the number and values used in the second step are only calculated at runtime.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: loops-param-result-
    -spec:
    -  entrypoint: loop-param-result-example
    -  templates:
    -  - name: loop-param-result-example
    -    steps:
    -    - - name: generate
    -        template: gen-number-list
    -    # Iterate over the list of numbers generated by the generate step above
    -    - - name: sleep
    -        template: sleep-n-sec
    -        arguments:
    -          parameters:
    -          - name: seconds
    -            value: "{{item}}"
    -        withParam: "{{steps.generate.outputs.result}}"
    -
    -  # Generate a list of numbers in JSON format
    -  - name: gen-number-list
    -    script:
    -      image: python:alpine3.6
    -      command: [python]
    -      source: |
    -        import json
    -        import sys
    -        json.dump([i for i in range(20, 31)], sys.stdout)
    -
    -  - name: sleep-n-sec
    -    inputs:
    -      parameters:
    -      - name: seconds
    -    container:
    -      image: alpine:latest
    -      command: [sh, -c]
    -      args: ["echo sleeping for {{inputs.parameters.seconds}} seconds; sleep {{inputs.parameters.seconds}}; echo done"]
    -
    -

    Accessing the aggregate results of a loop

    -

    The output of all iterations can be accessed as a JSON array, once the loop is done. -The example below shows how you can read it.

    -

    Please note: the output of each iteration must be a valid JSON.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: WorkflowTemplate
    -metadata:
    -  name: loop-test
    -spec:
    -  entrypoint: main
    -  templates:
    -  - name: main
    -    steps:
    -    - - name: execute-parallel-steps
    -        template: print-json-entry
    -        arguments:
    -          parameters:
    -          - name: index
    -            value: '{{item}}'
    -        withParam: '[1, 2, 3]'
    -    - - name: call-access-aggregate-output
    -        template: access-aggregate-output
    -        arguments:
    -          parameters:
    -          - name: aggregate-results
    -            # If the value of each loop iteration isn't a valid JSON,
    -            # you get a JSON parse error:
    -            value: '{{steps.execute-parallel-steps.outputs.result}}'
    -  - name: print-json-entry
    -    inputs:
    -      parameters:
    -      - name: index
    -    # The output must be a valid JSON
    -    script:
    -      image: alpine:latest
    -      command: [sh]
    -      source: |
    -        cat <<EOF
    -        {
    -        "input": "{{inputs.parameters.index}}",
    -        "transformed-input": "{{inputs.parameters.index}}.jpeg"
    -        }
    -        EOF
    -  - name: access-aggregate-output
    -    inputs:
    -      parameters:
    -      - name: aggregate-results
    -        value: 'no-value'
    -    script:
    -      image: alpine:latest
    -      command: [sh]
    -      source: |
    -        echo 'inputs.parameters.aggregate-results: "{{inputs.parameters.aggregate-results}}"'
    -
    -

    image

    -

    The last step of the workflow above should have this output: -inputs.parameters.aggregate-results: "[{"input":"1","transformed-input":"1.jpeg"},{"input":"2","transformed-input":"2.jpeg"},{"input":"3","transformed-input":"3.jpeg"}]"

    - - - - -

    Comments

    - - +

    Loops - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/output-parameters/index.html b/walk-through/output-parameters/index.html index ab91e7272ec1..a19831537c21 100644 --- a/walk-through/output-parameters/index.html +++ b/walk-through/output-parameters/index.html @@ -1,4048 +1,68 @@ - - - - - - - - - - - - - Output Parameters - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Output Parameters - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Output Parameters

    -

    Output parameters provide a general mechanism to use the result of a step as a parameter (and not just as an artifact). This allows you to use the result from any type of step, not just a script, for conditional tests, loops, and arguments. Output parameters work similarly to script result except that the value of the output parameter is set to the contents of a generated file rather than the contents of stdout.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: output-parameter-
    -spec:
    -  entrypoint: output-parameter
    -  templates:
    -  - name: output-parameter
    -    steps:
    -    - - name: generate-parameter
    -        template: whalesay
    -    - - name: consume-parameter
    -        template: print-message
    -        arguments:
    -          parameters:
    -          # Pass the hello-param output from the generate-parameter step as the message input to print-message
    -          - name: message
    -            value: "{{steps.generate-parameter.outputs.parameters.hello-param}}"
    -
    -  - name: whalesay
    -    container:
    -      image: docker/whalesay:latest
    -      command: [sh, -c]
    -      args: ["echo -n hello world > /tmp/hello_world.txt"]  # generate the content of hello_world.txt
    -    outputs:
    -      parameters:
    -      - name: hello-param  # name of output parameter
    -        valueFrom:
    -          path: /tmp/hello_world.txt # set the value of hello-param to the contents of this hello-world.txt
    -
    -  - name: print-message
    -    inputs:
    -      parameters:
    -      - name: message
    -    container:
    -      image: docker/whalesay:latest
    -      command: [cowsay]
    -      args: ["{{inputs.parameters.message}}"]
    -
    -

    DAG templates use the tasks prefix to refer to another task, for example {{tasks.generate-parameter.outputs.parameters.hello-param}}.

    -

    result output parameter

    -

    The result output parameter captures standard output. -It is accessible from the outputs map: outputs.result. -Only 256 kb of the standard output stream will be captured.

    -

    Scripts

    -

    Outputs of a script are assigned to standard output and captured in the result parameter. More details here.

    -

    Containers

    -

    Container steps and tasks also have their standard output captured in the result parameter. -Given a task, called log-int, result would then be accessible as {{ tasks.log-int.outputs.result }}. If using steps, substitute tasks for steps: {{ steps.log-int.outputs.result }}.

    - - - - -

    Comments

    - - +

    Output Parameters - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/parameters/index.html b/walk-through/parameters/index.html index 1aaaad7c5f45..d808431d98fb 100644 --- a/walk-through/parameters/index.html +++ b/walk-through/parameters/index.html @@ -1,3981 +1,68 @@ - - - - - - - - - - - - - Parameters - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Parameters - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Parameters

    -

    Let's look at a slightly more complex workflow spec with parameters.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: hello-world-parameters-
    -spec:
    -  # invoke the whalesay template with
    -  # "hello world" as the argument
    -  # to the message parameter
    -  entrypoint: whalesay
    -  arguments:
    -    parameters:
    -    - name: message
    -      value: hello world
    -
    -  templates:
    -  - name: whalesay
    -    inputs:
    -      parameters:
    -      - name: message       # parameter declaration
    -    container:
    -      # run cowsay with that message input parameter as args
    -      image: docker/whalesay
    -      command: [cowsay]
    -      args: ["{{inputs.parameters.message}}"]
    -
    -

    This time, the whalesay template takes an input parameter named message that is passed as the args to the cowsay command. In order to reference parameters (e.g., "{{inputs.parameters.message}}"), the parameters must be enclosed in double quotes to escape the curly braces in YAML.

    -

    The argo CLI provides a convenient way to override parameters used to invoke the entrypoint. For example, the following command would bind the message parameter to "goodbye world" instead of the default "hello world".

    -
    argo submit arguments-parameters.yaml -p message="goodbye world"
    -
    -

    In case of multiple parameters that can be overridden, the argo CLI provides a command to load parameters files in YAML or JSON format. Here is an example of that kind of parameter file:

    -
    message: goodbye world
    -
    -

    To run use following command:

    -
    argo submit arguments-parameters.yaml --parameter-file params.yaml
    -
    -

    Command-line parameters can also be used to override the default entrypoint and invoke any template in the workflow spec. For example, if you add a new version of the whalesay template called whalesay-caps but you don't want to change the default entrypoint, you can invoke this from the command line as follows:

    -
    argo submit arguments-parameters.yaml --entrypoint whalesay-caps
    -
    -

    By using a combination of the --entrypoint and -p parameters, you can call any template in the workflow spec with any parameter that you like.

    -

    The values set in the spec.arguments.parameters are globally scoped and can be accessed via {{workflow.parameters.parameter_name}}. This can be useful to pass information to multiple steps in a workflow. For example, if you wanted to run your workflows with different logging levels that are set in the environment of each container, you could have a YAML file similar to this one:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: global-parameters-
    -spec:
    -  entrypoint: A
    -  arguments:
    -    parameters:
    -    - name: log-level
    -      value: INFO
    -
    -  templates:
    -  - name: A
    -    container:
    -      image: containerA
    -      env:
    -      - name: LOG_LEVEL
    -        value: "{{workflow.parameters.log-level}}"
    -      command: [runA]
    -  - name: B
    -    container:
    -      image: containerB
    -      env:
    -      - name: LOG_LEVEL
    -        value: "{{workflow.parameters.log-level}}"
    -      command: [runB]
    -
    -

    In this workflow, both steps A and B would have the same log-level set to INFO and can easily be changed between workflow submissions using the -p flag.

    - - - - -

    Comments

    - - +

    Parameters - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/recursion/index.html b/walk-through/recursion/index.html index 4900209612fc..f27a7a961be7 100644 --- a/walk-through/recursion/index.html +++ b/walk-through/recursion/index.html @@ -1,3973 +1,68 @@ - - - - - - - - - - - - - Recursion - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Recursion - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Recursion

    -

    Templates can recursively invoke each other! In this variation of the above coin-flip template, we continue to flip coins until it comes up heads.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: coinflip-recursive-
    -spec:
    -  entrypoint: coinflip
    -  templates:
    -  - name: coinflip
    -    steps:
    -    # flip a coin
    -    - - name: flip-coin
    -        template: flip-coin
    -    # evaluate the result in parallel
    -    - - name: heads
    -        template: heads                 # call heads template if "heads"
    -        when: "{{steps.flip-coin.outputs.result}} == heads"
    -      - name: tails                     # keep flipping coins if "tails"
    -        template: coinflip
    -        when: "{{steps.flip-coin.outputs.result}} == tails"
    -
    -  - name: flip-coin
    -    script:
    -      image: python:alpine3.6
    -      command: [python]
    -      source: |
    -        import random
    -        result = "heads" if random.randint(0,1) == 0 else "tails"
    -        print(result)
    -
    -  - name: heads
    -    container:
    -      image: alpine:3.6
    -      command: [sh, -c]
    -      args: ["echo \"it was heads\""]
    -
    -

    Here's the result of a couple of runs of coin-flip for comparison.

    -
    argo get coinflip-recursive-tzcb5
    -
    -STEP                         PODNAME                              MESSAGE
    -  coinflip-recursive-vhph5
    - ├───✔ flip-coin             coinflip-recursive-vhph5-2123890397
    - └─┬─✔ heads                 coinflip-recursive-vhph5-128690560
    -   └─○ tails
    -
    -STEP                          PODNAME                              MESSAGE
    -  coinflip-recursive-tzcb5
    - ├───✔ flip-coin              coinflip-recursive-tzcb5-322836820
    - └─┬─○ heads
    -   └─✔ tails
    -     ├───✔ flip-coin          coinflip-recursive-tzcb5-1863890320
    -     └─┬─○ heads
    -       └─✔ tails
    -         ├───✔ flip-coin      coinflip-recursive-tzcb5-1768147140
    -         └─┬─○ heads
    -           └─✔ tails
    -             ├───✔ flip-coin  coinflip-recursive-tzcb5-4080411136
    -             └─┬─✔ heads      coinflip-recursive-tzcb5-4080323273
    -               └─○ tails
    -
    -

    In the first run, the coin immediately comes up heads and we stop. In the second run, the coin comes up tail three times before it finally comes up heads and we stop.

    - - - - -

    Comments

    - - +

    Recursion - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/retrying-failed-or-errored-steps/index.html b/walk-through/retrying-failed-or-errored-steps/index.html index 2bef113aaa5b..a3804e12110e 100644 --- a/walk-through/retrying-failed-or-errored-steps/index.html +++ b/walk-through/retrying-failed-or-errored-steps/index.html @@ -1,3944 +1,68 @@ - - - - - - - - - - - - - Retrying Failed or Errored Steps - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Retrying Failed or Errored Steps - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Retrying Failed or Errored Steps

    -

    You can specify a retryStrategy that will dictate how failed or errored steps are retried:

    -
    # This example demonstrates the use of retry back offs
    -apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: retry-backoff-
    -spec:
    -  entrypoint: retry-backoff
    -  templates:
    -  - name: retry-backoff
    -    retryStrategy:
    -      limit: 10
    -      retryPolicy: "Always"
    -      backoff:
    -        duration: "1"      # Must be a string. Default unit is seconds. Could also be a Duration, e.g.: "2m", "6h", "1d"
    -        factor: 2
    -        maxDuration: "1m"  # Must be a string. Default unit is seconds. Could also be a Duration, e.g.: "2m", "6h", "1d"
    -      affinity:
    -        nodeAntiAffinity: {}
    -    container:
    -      image: python:alpine3.6
    -      command: ["python", -c]
    -      # fail with a 66% probability
    -      args: ["import random; import sys; exit_code = random.choice([0, 1, 1]); sys.exit(exit_code)"]
    -
    -
      -
    • limit is the maximum number of times the container will be retried.
    • -
    • retryPolicy specifies if a container will be retried on failure, error, both, or only transient errors (e.g. i/o or TLS handshake timeout). "Always" retries on both errors and failures. Also available: OnFailure (default), "OnError", and "OnTransientError" (available after v3.0.0-rc2).
    • -
    • backoff is an exponential back-off
    • -
    • nodeAntiAffinity prevents running steps on the same host. Current implementation allows only empty nodeAntiAffinity (i.e. nodeAntiAffinity: {}) and by default it uses label kubernetes.io/hostname as the selector.
    • -
    -

    Providing an empty retryStrategy (i.e. retryStrategy: {}) will cause a container to retry until completion.

    - - - - -

    Comments

    - - +

    Retrying Failed or Errored Steps - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/scripts-and-results/index.html b/walk-through/scripts-and-results/index.html index 9638d84bc42f..d9b5a626b9eb 100644 --- a/walk-through/scripts-and-results/index.html +++ b/walk-through/scripts-and-results/index.html @@ -1,3966 +1,68 @@ - - - - - - - - - - - - - Scripts And Results - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Scripts And Results - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Scripts And Results

    -

    Often, we just want a template that executes a script specified as a here-script (also known as a here document) in the workflow spec. This example shows how to do that:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: scripts-bash-
    -spec:
    -  entrypoint: bash-script-example
    -  templates:
    -  - name: bash-script-example
    -    steps:
    -    - - name: generate
    -        template: gen-random-int-bash
    -    - - name: print
    -        template: print-message
    -        arguments:
    -          parameters:
    -          - name: message
    -            value: "{{steps.generate.outputs.result}}"  # The result of the here-script
    -
    -  - name: gen-random-int-bash
    -    script:
    -      image: debian:9.4
    -      command: [bash]
    -      source: |                                         # Contents of the here-script
    -        cat /dev/urandom | od -N2 -An -i | awk -v f=1 -v r=100 '{printf "%i\n", f + r * $1 / 65536}'
    -
    -  - name: gen-random-int-python
    -    script:
    -      image: python:alpine3.6
    -      command: [python]
    -      source: |
    -        import random
    -        i = random.randint(1, 100)
    -        print(i)
    -
    -  - name: gen-random-int-javascript
    -    script:
    -      image: node:9.1-alpine
    -      command: [node]
    -      source: |
    -        var rand = Math.floor(Math.random() * 100);
    -        console.log(rand);
    -
    -  - name: print-message
    -    inputs:
    -      parameters:
    -      - name: message
    -    container:
    -      image: alpine:latest
    -      command: [sh, -c]
    -      args: ["echo result was: {{inputs.parameters.message}}"]
    -
    -

    The script keyword allows the specification of the script body using the source tag. This creates a temporary file containing the script body and then passes the name of the temporary file as the final parameter to command, which should be an interpreter that executes the script body.

    -

    The use of the script feature also assigns the standard output of running the script to a special output parameter named result. This allows you to use the result of running the script itself in the rest of the workflow spec. In this example, the result is simply echoed by the print-message template.

    - - - - -

    Comments

    - - +

    Scripts And Results - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/secrets/index.html b/walk-through/secrets/index.html index eb9db8e6f4af..c994d5348e57 100644 --- a/walk-through/secrets/index.html +++ b/walk-through/secrets/index.html @@ -1,3948 +1,68 @@ - - - - - - - - - - - - - Secrets - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Secrets - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Secrets

    -

    Argo supports the same secrets syntax and mechanisms as Kubernetes Pod specs, which allows access to secrets as environment variables or volume mounts. See the Kubernetes documentation for more information.

    -
    # To run this example, first create the secret by running:
    -# kubectl create secret generic my-secret --from-literal=mypassword=S00perS3cretPa55word
    -apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: secret-example-
    -spec:
    -  entrypoint: whalesay
    -  # To access secrets as files, add a volume entry in spec.volumes[] and
    -  # then in the container template spec, add a mount using volumeMounts.
    -  volumes:
    -  - name: my-secret-vol
    -    secret:
    -      secretName: my-secret     # name of an existing k8s secret
    -  templates:
    -  - name: whalesay
    -    container:
    -      image: alpine:3.7
    -      command: [sh, -c]
    -      args: ['
    -        echo "secret from env: $MYSECRETPASSWORD";
    -        echo "secret from file: `cat /secret/mountpath/mypassword`"
    -      ']
    -      # To access secrets as environment variables, use the k8s valueFrom and
    -      # secretKeyRef constructs.
    -      env:
    -      - name: MYSECRETPASSWORD  # name of env var
    -        valueFrom:
    -          secretKeyRef:
    -            name: my-secret     # name of an existing k8s secret
    -            key: mypassword     # 'key' subcomponent of the secret
    -      volumeMounts:
    -      - name: my-secret-vol     # mount file containing secret at /secret/mountpath
    -        mountPath: "/secret/mountpath"
    -
    - - - - -

    Comments

    - - +

    Secrets - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/sidecars/index.html b/walk-through/sidecars/index.html index afde8b609cda..517a45693f06 100644 --- a/walk-through/sidecars/index.html +++ b/walk-through/sidecars/index.html @@ -1,3933 +1,68 @@ - - - - - - - - - - - - - Sidecars - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Sidecars - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Sidecars

    -

    A sidecar is another container that executes concurrently in the same pod as the main container and is useful in creating multi-container pods.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: sidecar-nginx-
    -spec:
    -  entrypoint: sidecar-nginx-example
    -  templates:
    -  - name: sidecar-nginx-example
    -    container:
    -      image: appropriate/curl
    -      command: [sh, -c]
    -      # Try to read from nginx web server until it comes up
    -      args: ["until `curl -G 'http://127.0.0.1/' >& /tmp/out`; do echo sleep && sleep 1; done && cat /tmp/out"]
    -    # Create a simple nginx web server
    -    sidecars:
    -    - name: nginx
    -      image: nginx:1.13
    -      command: [nginx, -g, daemon off;]
    -
    -

    In the above example, we create a sidecar container that runs Nginx as a simple web server. The order in which containers come up is random, so in this example the main container polls the Nginx container until it is ready to service requests. This is a good design pattern when designing multi-container systems: always wait for any services you need to come up before running your main code.

    - - - - -

    Comments

    - - +

    Sidecars - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/steps/index.html b/walk-through/steps/index.html index a57d73addae9..7a1ae551ba73 100644 --- a/walk-through/steps/index.html +++ b/walk-through/steps/index.html @@ -1,3962 +1,68 @@ - - - - - - - - - - - - - Steps - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Steps - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Steps

    -

    In this example, we'll see how to create multi-step workflows, how to define more than one template in a workflow spec, and how to create nested workflows. Be sure to read the comments as they provide useful explanations.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: steps-
    -spec:
    -  entrypoint: hello-hello-hello
    -
    -  # This spec contains two templates: hello-hello-hello and whalesay
    -  templates:
    -  - name: hello-hello-hello
    -    # Instead of just running a container
    -    # This template has a sequence of steps
    -    steps:
    -    - - name: hello1            # hello1 is run before the following steps
    -        template: whalesay
    -        arguments:
    -          parameters:
    -          - name: message
    -            value: "hello1"
    -    - - name: hello2a           # double dash => run after previous step
    -        template: whalesay
    -        arguments:
    -          parameters:
    -          - name: message
    -            value: "hello2a"
    -      - name: hello2b           # single dash => run in parallel with previous step
    -        template: whalesay
    -        arguments:
    -          parameters:
    -          - name: message
    -            value: "hello2b"
    -
    -  # This is the same template as from the previous example
    -  - name: whalesay
    -    inputs:
    -      parameters:
    -      - name: message
    -    container:
    -      image: docker/whalesay
    -      command: [cowsay]
    -      args: ["{{inputs.parameters.message}}"]
    -
    -

    The above workflow spec prints three different flavors of "hello". The hello-hello-hello template consists of three steps. The first step named hello1 will be run in sequence whereas the next two steps named hello2a and hello2b will be run in parallel with each other. Using the argo CLI command, we can graphically display the execution history of this workflow spec, which shows that the steps named hello2a and hello2b ran in parallel with each other.

    -
    STEP            TEMPLATE           PODNAME                 DURATION  MESSAGE
    -  steps-z2zdn  hello-hello-hello
    - ├───✔ hello1   whalesay           steps-z2zdn-27420706    2s
    - └─┬─✔ hello2a  whalesay           steps-z2zdn-2006760091  3s
    -   └─✔ hello2b  whalesay           steps-z2zdn-2023537710  3s
    -
    - - - - -

    Comments

    - - +

    Steps - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/suspending/index.html b/walk-through/suspending/index.html index 483fb036fbcb..e06e1b289218 100644 --- a/walk-through/suspending/index.html +++ b/walk-through/suspending/index.html @@ -1,3951 +1,68 @@ - - - - - - - - - - - - - Suspending - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Suspending - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Suspending

    -

    Workflows can be suspended by

    -
    argo suspend WORKFLOW
    -
    -

    Or by specifying a suspend step on the workflow:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: suspend-template-
    -spec:
    -  entrypoint: suspend
    -  templates:
    -  - name: suspend
    -    steps:
    -    - - name: build
    -        template: whalesay
    -    - - name: approve
    -        template: approve
    -    - - name: delay
    -        template: delay
    -    - - name: release
    -        template: whalesay
    -
    -  - name: approve
    -    suspend: {}
    -
    -  - name: delay
    -    suspend:
    -      duration: "20"    # Must be a string. Default unit is seconds. Could also be a Duration, e.g.: "2m", "6h"
    -
    -  - name: whalesay
    -    container:
    -      image: docker/whalesay
    -      command: [cowsay]
    -      args: ["hello world"]
    -
    -

    Once suspended, a Workflow will not schedule any new steps until it is resumed. It can be resumed manually by

    -
    argo resume WORKFLOW
    -
    -

    Or automatically with a duration limit as the example above.

    - - - - -

    Comments

    - - +

    Suspending - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/the-structure-of-workflow-specs/index.html b/walk-through/the-structure-of-workflow-specs/index.html index 8580596a1572..f5c7bebf4a0f 100644 --- a/walk-through/the-structure-of-workflow-specs/index.html +++ b/walk-through/the-structure-of-workflow-specs/index.html @@ -1,3937 +1,68 @@ - - - - - - - - - - - - - The Structure of Workflow Specs - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + The Structure of Workflow Specs - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    The Structure of Workflow Specs

    -

    We now know enough about the basic components of a workflow spec. To review its basic structure:

    -
      -
    • Kubernetes header including meta-data
    • -
    • -

      Spec body

      -
        -
      • Entrypoint invocation with optional arguments
      • -
      • List of template definitions
      • -
      -
    • -
    • -

      For each template definition

      -
        -
      • Name of the template
      • -
      • Optionally a list of inputs
      • -
      • Optionally a list of outputs
      • -
      • Container invocation (leaf template) or a list of steps
          -
        • For each step, a template invocation
        • -
        -
      • -
      -
    • -
    -

    To summarize, workflow specs are composed of a set of Argo templates where each template consists of an optional input section, an optional output section and either a container invocation or a list of steps where each step invokes another template.

    -

    Note that the container section of the workflow spec will accept the same options as the container section of a pod spec, including but not limited to environment variables, secrets, and volume mounts. Similarly, for volume claims and volumes.

    - - - - -

    Comments

    - - +

    The Structure of Workflow Specs - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/timeouts/index.html b/walk-through/timeouts/index.html index 9cf43218f65d..a30d2ec49006 100644 --- a/walk-through/timeouts/index.html +++ b/walk-through/timeouts/index.html @@ -1,3942 +1,68 @@ - - - - - - - - - - - - - Timeouts - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Timeouts - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Timeouts

    -

    You can use the field activeDeadlineSeconds to limit the elapsed time for a workflow:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: timeouts-
    -spec:
    -  activeDeadlineSeconds: 10 # terminate workflow after 10 seconds
    -  entrypoint: sleep
    -  templates:
    -  - name: sleep
    -    container:
    -      image: alpine:latest
    -      command: [sh, -c]
    -      args: ["echo sleeping for 1m; sleep 60; echo done"]
    -
    -

    You can limit the elapsed time for a specific template as well:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: timeouts-
    -spec:
    -  entrypoint: sleep
    -  templates:
    -  - name: sleep
    -    activeDeadlineSeconds: 10 # terminate container template after 10 seconds
    -    container:
    -      image: alpine:latest
    -      command: [sh, -c]
    -      args: ["echo sleeping for 1m; sleep 60; echo done"]
    -
    - - - - -

    Comments

    - - +

    Timeouts - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/walk-through/volumes/index.html b/walk-through/volumes/index.html index 41e45ad3103f..ef085c40c404 100644 --- a/walk-through/volumes/index.html +++ b/walk-through/volumes/index.html @@ -1,4095 +1,68 @@ - - - - - - - - - - - - - Volumes - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Volumes - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Volumes

    -

    The following example dynamically creates a volume and then uses the volume in a two step workflow.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: volumes-pvc-
    -spec:
    -  entrypoint: volumes-pvc-example
    -  volumeClaimTemplates:                 # define volume, same syntax as k8s Pod spec
    -  - metadata:
    -      name: workdir                     # name of volume claim
    -    spec:
    -      accessModes: [ "ReadWriteOnce" ]
    -      resources:
    -        requests:
    -          storage: 1Gi                  # Gi => 1024 * 1024 * 1024
    -
    -  templates:
    -  - name: volumes-pvc-example
    -    steps:
    -    - - name: generate
    -        template: whalesay
    -    - - name: print
    -        template: print-message
    -
    -  - name: whalesay
    -    container:
    -      image: docker/whalesay:latest
    -      command: [sh, -c]
    -      args: ["echo generating message in volume; cowsay hello world | tee /mnt/vol/hello_world.txt"]
    -      # Mount workdir volume at /mnt/vol before invoking docker/whalesay
    -      volumeMounts:                     # same syntax as k8s Pod spec
    -      - name: workdir
    -        mountPath: /mnt/vol
    -
    -  - name: print-message
    -    container:
    -      image: alpine:latest
    -      command: [sh, -c]
    -      args: ["echo getting message from volume; find /mnt/vol; cat /mnt/vol/hello_world.txt"]
    -      # Mount workdir volume at /mnt/vol before invoking docker/whalesay
    -      volumeMounts:                     # same syntax as k8s Pod spec
    -      - name: workdir
    -        mountPath: /mnt/vol
    -
    -

    Volumes are a very useful way to move large amounts of data from one step in a workflow to another. Depending on the system, some volumes may be accessible concurrently from multiple steps.

    -

    In some cases, you want to access an already existing volume rather than creating/destroying one dynamically.

    -
    # Define Kubernetes PVC
    -kind: PersistentVolumeClaim
    -apiVersion: v1
    -metadata:
    -  name: my-existing-volume
    -spec:
    -  accessModes: [ "ReadWriteOnce" ]
    -  resources:
    -    requests:
    -      storage: 1Gi
    -
    ----
    -apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: volumes-existing-
    -spec:
    -  entrypoint: volumes-existing-example
    -  volumes:
    -  # Pass my-existing-volume as an argument to the volumes-existing-example template
    -  # Same syntax as k8s Pod spec
    -  - name: workdir
    -    persistentVolumeClaim:
    -      claimName: my-existing-volume
    -
    -  templates:
    -  - name: volumes-existing-example
    -    steps:
    -    - - name: generate
    -        template: whalesay
    -    - - name: print
    -        template: print-message
    -
    -  - name: whalesay
    -    container:
    -      image: docker/whalesay:latest
    -      command: [sh, -c]
    -      args: ["echo generating message in volume; cowsay hello world | tee /mnt/vol/hello_world.txt"]
    -      volumeMounts:
    -      - name: workdir
    -        mountPath: /mnt/vol
    -
    -  - name: print-message
    -    container:
    -      image: alpine:latest
    -      command: [sh, -c]
    -      args: ["echo getting message from volume; find /mnt/vol; cat /mnt/vol/hello_world.txt"]
    -      volumeMounts:
    -      - name: workdir
    -        mountPath: /mnt/vol
    -
    -

    It's also possible to declare existing volumes at the template level, instead of the workflow level. -Workflows can generate volumes using a resource step.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: template-level-volume-
    -spec:
    -  entrypoint: generate-and-use-volume
    -  templates:
    -  - name: generate-and-use-volume
    -    steps:
    -    - - name: generate-volume
    -        template: generate-volume
    -        arguments:
    -          parameters:
    -            - name: pvc-size
    -              # In a real-world example, this could be generated by a previous workflow step.
    -              value: '1Gi'
    -    - - name: generate
    -        template: whalesay
    -        arguments:
    -          parameters:
    -            - name: pvc-name
    -              value: '{{steps.generate-volume.outputs.parameters.pvc-name}}'
    -    - - name: print
    -        template: print-message
    -        arguments:
    -          parameters:
    -            - name: pvc-name
    -              value: '{{steps.generate-volume.outputs.parameters.pvc-name}}'
    -
    -  - name: generate-volume
    -    inputs:
    -      parameters:
    -        - name: pvc-size
    -    resource:
    -      action: create
    -      setOwnerReference: true
    -      manifest: |
    -        apiVersion: v1
    -        kind: PersistentVolumeClaim
    -        metadata:
    -          generateName: pvc-example-
    -        spec:
    -          accessModes: ['ReadWriteOnce', 'ReadOnlyMany']
    -          resources:
    -            requests:
    -              storage: '{{inputs.parameters.pvc-size}}'
    -    outputs:
    -      parameters:
    -        - name: pvc-name
    -          valueFrom:
    -            jsonPath: '{.metadata.name}'
    -
    -  - name: whalesay
    -    inputs:
    -      parameters:
    -        - name: pvc-name
    -    volumes:
    -      - name: workdir
    -        persistentVolumeClaim:
    -          claimName: '{{inputs.parameters.pvc-name}}'
    -    container:
    -      image: docker/whalesay:latest
    -      command: [sh, -c]
    -      args: ["echo generating message in volume; cowsay hello world | tee /mnt/vol/hello_world.txt"]
    -      volumeMounts:
    -      - name: workdir
    -        mountPath: /mnt/vol
    -
    -  - name: print-message
    -    inputs:
    -        parameters:
    -          - name: pvc-name
    -    volumes:
    -      - name: workdir
    -        persistentVolumeClaim:
    -          claimName: '{{inputs.parameters.pvc-name}}'
    -    container:
    -      image: alpine:latest
    -      command: [sh, -c]
    -      args: ["echo getting message from volume; find /mnt/vol; cat /mnt/vol/hello_world.txt"]
    -      volumeMounts:
    -      - name: workdir
    -        mountPath: /mnt/vol
    -
    - - - - -

    Comments

    - - +

    Volumes - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/webhooks/index.html b/webhooks/index.html index 9078184f9226..c10427025581 100644 --- a/webhooks/index.html +++ b/webhooks/index.html @@ -1,3931 +1,68 @@ - - - - - - - - - - - - - Webhooks - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Webhooks - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Webhooks

    -
    -

    v2.11 and after

    -
    -

    Many clients can send events via the events API endpoint using a standard authorization header. However, for clients that are unable to do so (e.g. because they use signature verification as proof of origin), additional configuration is required.

    -

    In the namespace that will receive the event, create access token resources for your client:

    -
      -
    • A role with permissions to get workflow templates and to create a workflow: example
    • -
    • A service account for the client: example.
    • -
    • A binding of the account to the role: example
    • -
    -

    Additionally create:

    -
      -
    • A secret named argo-workflows-webhook-clients listing the service accounts: example
    • -
    -

    The secret argo-workflows-webhook-clients tells Argo:

    -
      -
    • What type of webhook the account can be used for, e.g. github.
    • -
    • What "secret" that webhook is configured for, e.g. in your Github settings page.
    • -
    - - - - -

    Comments

    - - +

    Webhooks - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/widgets/index.html b/widgets/index.html index a468a50e3185..322e90651ef8 100644 --- a/widgets/index.html +++ b/widgets/index.html @@ -1,3920 +1,68 @@ - - - - - - - - - - - - - Widgets - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Widgets - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Widgets

    -
    -

    v3.0 and after

    -
    -

    Widgets are intended to be embedded into other applications using inline frames (iframe). This may not work with your configuration. You may need to:

    -
      -
    • Run the Argo Server with an account that can read workflows. That can be done using --auth-mode=server and configuring the argo-server service account.
    • -
    • Run the Argo Server with --x-frame-options=SAMEORIGIN or --x-frame-options=.
    • -
    - - - - -

    Comments

    - - +

    Widgets - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/windows/index.html b/windows/index.html index d56651bb9f0b..391a1c07077e 100644 --- a/windows/index.html +++ b/windows/index.html @@ -1,4123 +1,68 @@ - - - - - - - - - - - - - Windows Container Support - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Windows Container Support - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - - - - +
    +
    +
    +
    - - - - - - - - -

    Windows Container Support

    -

    The Argo server and the workflow controller currently only run on Linux. The workflow executor however also runs on Windows nodes, meaning you can use Windows containers inside your workflows! Here are the steps to get started.

    -

    Requirements

    -
      -
    • Kubernetes 1.14 or later, supporting Windows nodes
    • -
    • Hybrid cluster containing Linux and Windows nodes like described in the Kubernetes docs
    • -
    • Argo configured and running like described here
    • -
    -

    Schedule workflows with Windows containers

    -

    If you're running workflows in your hybrid Kubernetes cluster, always make sure to include a nodeSelector to run the steps on the correct host OS:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: hello-windows-
    -spec:
    -  entrypoint: hello-win
    -  templates:
    -    - name: hello-win
    -      nodeSelector:
    -        kubernetes.io/os: windows    # specify the OS your step should run on
    -      container:
    -        image: mcr.microsoft.com/windows/nanoserver:1809
    -        command: ["cmd", "/c"]
    -        args: ["echo", "Hello from Windows Container!"]
    -
    -

    You can run this example and get the logs:

    -
    $ argo submit --watch https://raw.githubusercontent.com/argoproj/argo-workflows/main/examples/hello-windows.yaml
    -$ argo logs hello-windows-s9kk5
    -hello-windows-s9kk5: "Hello from Windows Container!"
    -
    -

    Schedule hybrid workflows

    -

    You can also run different steps on different host operating systems. This can for example be very helpful when you need to compile your application on Windows and Linux.

    -

    An example workflow can look like the following:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: hello-hybrid-
    -spec:
    -  entrypoint: mytemplate
    -  templates:
    -    - name: mytemplate
    -      steps:
    -        - - name: step1
    -            template: hello-win
    -        - - name: step2
    -            template: hello-linux
    -
    -    - name: hello-win
    -      nodeSelector:
    -        kubernetes.io/os: windows
    -      container:
    -        image: mcr.microsoft.com/windows/nanoserver:1809
    -        command: ["cmd", "/c"]
    -        args: ["echo", "Hello from Windows Container!"]
    -    - name: hello-linux
    -      nodeSelector:
    -        kubernetes.io/os: linux
    -      container:
    -        image: alpine
    -        command: [echo]
    -        args: ["Hello from Linux Container!"]
    -
    -

    Again, you can run this example and get the logs:

    -
    $ argo submit --watch https://raw.githubusercontent.com/argoproj/argo-workflows/main/examples/hello-hybrid.yaml
    -$ argo logs hello-hybrid-plqpp
    -hello-hybrid-plqpp-1977432187: "Hello from Windows Container!"
    -hello-hybrid-plqpp-764774907: Hello from Linux Container!
    -
    -

    Artifact mount path

    -

    Artifacts work mostly the same way as on Linux. All paths get automatically mapped to the C: drive. For example:

    -
     # ...
    -    - name: print-message
    -      inputs:
    -        artifacts:
    -          # unpack the message input artifact
    -          # and put it at C:\message
    -          - name: message
    -            path: "/message" # gets mapped to C:\message
    -      nodeSelector:
    -        kubernetes.io/os: windows
    -      container:
    -        image: mcr.microsoft.com/windows/nanoserver:1809
    -        command: ["cmd", "/c"]
    -        args: ["dir C:\\message"]   # List the C:\message directory
    -
    -

    Remember that volume mounts on Windows can only target a directory in the container, and not an individual file.

    -

    Limitations

    -
      -
    • Sharing process namespaces doesn't work on Windows so you can't use the Process Namespace Sharing (PNS) workflow executor.
    • -
    • The executor Windows container is built using Nano Server as the base image. Running a newer windows version (e.g. 1909) is currently not confirmed to be working. If this is required, you need to build the executor container yourself by first adjusting the base image.
    • -
    -

    Building the workflow executor image for Windows

    -

    To build the workflow executor image for Windows you need a Windows machine running Windows Server 2019 with Docker installed like described in the docs.

    -

    You then clone the project and run the Docker build with the Dockerfile for Windows and argoexec as a target:

    -
    git clone https://github.com/argoproj/argo-workflows.git
    -cd argo
    -docker build -t myargoexec -f .\Dockerfile.windows --target argoexec .
    -
    - - - - -

    Comments

    - - +

    Windows Container Support - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/work-avoidance/index.html b/work-avoidance/index.html index 1f6e18cd7d15..c4fff260777d 100644 --- a/work-avoidance/index.html +++ b/work-avoidance/index.html @@ -1,3942 +1,68 @@ - - - - - - - - - - - - - Work Avoidance - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Work Avoidance - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Work Avoidance

    -
    -

    v2.9 and after

    -
    -

    You can make workflows faster and more robust by employing work avoidance. A workflow that utilizes this is simply a workflow containing steps that do not run if the work has already been done.

    -

    This is a technique is similar to memoization. Work avoidance is totally in your control and you make the decisions as to have to skip the work. Memoization is a feature of Argo Workflows to automatically skip steps which generate outputs. Prior to version 3.5 this required outputs to be specified, but you can use memoization for all steps and tasks in version 3.5 or later.

    -

    This simplest way to do this is to use marker files.

    -

    Use cases:

    -
      -
    • An expensive step appears across multiple workflows - you want to avoid repeating them.
    • -
    • A workflow has unreliable tasks - you want to be able to resubmit the workflow.
    • -
    -

    A marker file is a file that indicates the work has already been done. Before doing the work you check to see if the marker has already been done:

    -
    if [ -e /work/markers/name-of-task ]; then
    -    echo "work already done"
    -    exit 0
    -fi
    -echo "working very hard"
    -touch /work/markers/name-of-task
    -
    -

    Choose a name for the file that is unique for the task, e.g. the template name and all the parameters:

    -
    touch /work/markers/$(date +%Y-%m-%d)-echo-{{inputs.parameters.num}}
    -
    -

    You need to store the marker files between workflows and this can be achieved using a PVC and optional input artifact.

    -

    This complete work avoidance example has the following:

    -
      -
    • A PVC to store the markers on.
    • -
    • A load-markers step that loads the marker files from artifact storage.
    • -
    • Multiple echo tasks that avoid work using marker files.
    • -
    • A save-markers exit handler to save the marker files, even if they are not needed.
    • -
    - - - - -

    Comments

    - - +

    Work Avoidance - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/workflow-archive/index.html b/workflow-archive/index.html index 2a1f00fe957f..b1873910135c 100644 --- a/workflow-archive/index.html +++ b/workflow-archive/index.html @@ -1,4134 +1,68 @@ - - - - - - - - - - - - - Workflow Archive - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Workflow Archive - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Workflow Archive

    -
    -

    v2.5 and after

    -
    -

    If you want to keep completed workflows for a long time, you can use the workflow archive to save them in a Postgres or MySQL (>= 5.7.8) database. -The workflow archive stores the status of the workflow, which pods have been executed, what was the result etc. -The job logs of the workflow pods will not be archived. -If you need to save the logs of the pods, you must setup an artifact repository according to this doc.

    -

    The quick-start deployment includes a Postgres database server. -In this case the workflow archive is already enabled. -Such a deployment is convenient for test environments, but in a production environment you must use a production quality database service.

    -

    Enabling Workflow Archive

    -

    To enable archiving of the workflows, you must configure database parameters in the persistence section of your configuration and set archive: to true.

    -

    Example:

    -
    persistence: 
    -  archive: true
    -  postgresql:
    -    host: localhost
    -    port: 5432
    -    database: postgres
    -    tableName: argo_workflows
    -    userNameSecret:
    -      name: argo-postgres-config
    -      key: username
    -    passwordSecret:
    -      name: argo-postgres-config
    -      key: password
    -
    - -

    You must also create the secret with database user and password in the namespace of the workflow controller.

    -

    Example:

    -
    kubectl create secret generic argo-postgres-config -n argo --from-literal=password=mypassword --from-literal=username=argodbuser
    -
    - -

    Note that IAM-based authentication is not currently supported. However, you can start your database proxy as a sidecar -(e.g. via CloudSQL Proxy on GCP) and then specify your local -proxy address, IAM username, and an empty string as your password in the persistence configuration to connect to it.

    -

    The following tables will be created in the database when you start the workflow controller with enabled archive:

    -
      -
    • argo_workflows
    • -
    • argo_archived_workflows
    • -
    • argo_archived_workflows_labels
    • -
    • schema_history
    • -
    -

    Automatic Database Migration

    -

    Every time the Argo workflow-controller starts with persistence enabled, it tries to migrate the database to the correct version. -If the database migration fails, the workflow-controller will also fail to start. -In this case you can delete all the above tables and restart the workflow-controller.

    -

    If you know what are you doing you also have an option to skip migration:

    -
    persistence: 
    -  skipMigration: true
    -
    - -

    Required database permissions

    -

    Postgres

    -

    The database user/role must have CREATE and USAGE permissions on the public schema of the database so that the tables can be created during the migration.

    -

    Archive TTL

    -

    You can configure the time period to keep archived workflows before they will be deleted by the archived workflow garbage collection function. -The default is forever.

    -

    Example:

    -
    persistence: 
    -  archiveTTL: 10d
    -
    - -

    The ARCHIVED_WORKFLOW_GC_PERIOD variable defines the periodicity of running the garbage collection function. -The default value is documented here. -When the workflow controller starts, it sets the ticker to run every ARCHIVED_WORKFLOW_GC_PERIOD. -It does not run the garbage collection function immediately and the first garbage collection happens only after the period defined in the ARCHIVED_WORKFLOW_GC_PERIOD variable.

    -

    Cluster Name

    -

    Optionally you can set a unique name of your Kubernetes cluster. This name will populate the clustername field in the argo_archived_workflows table.

    -

    Example:

    -
    persistence: 
    -  clusterName: dev-cluster
    -
    - -

    Disabling Workflow Archive

    -

    To disable archiving of the workflows, set archive: to false in the persistence section of your configuration.

    -

    Example:

    -
    persistence: 
    -  archive: false
    -
    - - - - -

    Comments

    - - +

    Workflow Archive - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/workflow-concepts/index.html b/workflow-concepts/index.html index d76b1fa86513..b76db1ea83a9 100644 --- a/workflow-concepts/index.html +++ b/workflow-concepts/index.html @@ -1,4265 +1,68 @@ - - - - - - - - - - - - - Core Concepts - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Core Concepts - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Core Concepts

    -

    This page serves as an introduction to the core concepts of Argo.

    -

    The Workflow

    -

    The Workflow is the most important resource in Argo and serves two important functions:

    -
      -
    1. It defines the workflow to be executed.
    2. -
    3. It stores the state of the workflow.
    4. -
    -

    Because of these dual responsibilities, a Workflow should be treated as a "live" object. It is not only a static definition, but is also an "instance" of said definition. (If it isn't clear what this means, it will be explained below).

    -

    Workflow Spec

    -

    The workflow to be executed is defined in the Workflow.spec field. The core structure of a Workflow spec is a list of templates and an entrypoint.

    -

    templates can be loosely thought of as "functions": they define instructions to be executed. -The entrypoint field defines what the "main" function will be – that is, the template that will be executed first.

    -

    Here is an example of a simple Workflow spec with a single template:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: hello-world-  # Name of this Workflow
    -spec:
    -  entrypoint: whalesay        # Defines "whalesay" as the "main" template
    -  templates:
    -  - name: whalesay            # Defining the "whalesay" template
    -    container:
    -      image: docker/whalesay
    -      command: [cowsay]
    -      args: ["hello world"]   # This template runs "cowsay" in the "whalesay" image with arguments "hello world"
    -
    -

    template Types

    -

    There are 6 types of templates, divided into two different categories.

    -

    Template Definitions

    -

    These templates define work to be done, usually in a Container.

    -
    Container
    -

    Perhaps the most common template type, it will schedule a Container. The spec of the template is the same as the Kubernetes container spec, so you can define a container here the same way you do anywhere else in Kubernetes.

    -

    Example:

    -
      - name: whalesay
    -    container:
    -      image: docker/whalesay
    -      command: [cowsay]
    -      args: ["hello world"]
    -
    -
    Script
    -

    A convenience wrapper around a container. The spec is the same as a container, but adds the source: field which allows you to define a script in-place. -The script will be saved into a file and executed for you. The result of the script is automatically exported into an Argo variable either {{tasks.<NAME>.outputs.result}} or {{steps.<NAME>.outputs.result}}, depending how it was called.

    -

    Example:

    -
      - name: gen-random-int
    -    script:
    -      image: python:alpine3.6
    -      command: [python]
    -      source: |
    -        import random
    -        i = random.randint(1, 100)
    -        print(i)
    -
    -
    Resource
    -

    Performs operations on cluster Resources directly. It can be used to get, create, apply, delete, replace, or patch resources on your cluster.

    -

    This example creates a ConfigMap resource on the cluster:

    -
      - name: k8s-owner-reference
    -    resource:
    -      action: create
    -      manifest: |
    -        apiVersion: v1
    -        kind: ConfigMap
    -        metadata:
    -          generateName: owned-eg-
    -        data:
    -          some: value
    -
    -
    Suspend
    -

    A suspend template will suspend execution, either for a duration or until it is resumed manually. Suspend templates can be resumed from the CLI (with argo resume), the API endpoint, or the UI.

    -

    Example:

    -
      - name: delay
    -    suspend:
    -      duration: "20s"
    -
    -

    Template Invocators

    -

    These templates are used to invoke/call other templates and provide execution control.

    -
    Steps
    -

    A steps template allows you to define your tasks in a series of steps. The structure of the template is a "list of lists". Outer lists will run sequentially and inner lists will run in parallel. If you want to run inner lists one by one, use the Synchronization feature. You can set a wide array of options to control execution, such as when: clauses to conditionally execute a step.

    -

    In this example step1 runs first. Once it is completed, step2a and step2b will run in parallel:

    -
      - name: hello-hello-hello
    -    steps:
    -    - - name: step1
    -        template: prepare-data
    -    - - name: step2a
    -        template: run-data-first-half
    -      - name: step2b
    -        template: run-data-second-half
    -
    -
    DAG
    -

    A dag template allows you to define your tasks as a graph of dependencies. In a DAG, you list all your tasks and set which other tasks must complete before a particular task can begin. Tasks without any dependencies will be run immediately.

    -

    In this example A runs first. Once it is completed, B and C will run in parallel and once they both complete, D will run:

    -
      - name: diamond
    -    dag:
    -      tasks:
    -      - name: A
    -        template: echo
    -      - name: B
    -        dependencies: [A]
    -        template: echo
    -      - name: C
    -        dependencies: [A]
    -        template: echo
    -      - name: D
    -        dependencies: [B, C]
    -        template: echo
    -
    -

    Architecture

    -

    If you are interested in Argo's underlying architecture, see Architecture.

    - - - - -

    Comments

    - - +

    Core Concepts - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/workflow-controller-configmap/index.html b/workflow-controller-configmap/index.html index 410aef31e88d..c364e8ba4862 100644 --- a/workflow-controller-configmap/index.html +++ b/workflow-controller-configmap/index.html @@ -1,4022 +1,68 @@ - - - - - - - - - - - - - Workflow Controller Config Map - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Workflow Controller Config Map - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Workflow Controller Config Map

    -

    Introduction

    -

    The Workflow Controller Config Map is used to set controller-wide settings.

    -

    For a detailed example, please see workflow-controller-configmap.yaml.

    -

    Alternate Structure

    -

    In all versions, the configuration may be under a config: | key:

    -
    # This file describes the config settings available in the workflow controller configmap
    -apiVersion: v1
    -kind: ConfigMap
    -metadata:
    -  name: workflow-controller-configmap
    -data:
    -  config: |
    -    instanceID: my-ci-controller
    -    artifactRepository:
    -      archiveLogs: true
    -      s3:
    -        endpoint: s3.amazonaws.com
    -        bucket: my-bucket
    -        region: us-west-2
    -        insecure: false
    -        accessKeySecret:
    -          name: my-s3-credentials
    -          key: accessKey
    -        secretKeySecret:
    -          name: my-s3-credentials
    -          key: secretKey
    -
    -

    In version 2.7+, the config: | key is optional. However, if the config: | key is not used, all nested maps under top level -keys should be strings. This makes it easier to generate the map with some configuration management tools like Kustomize.

    -
    # This file describes the config settings available in the workflow controller configmap
    -apiVersion: v1
    -kind: ConfigMap
    -metadata:
    -  name: workflow-controller-configmap
    -data:                      # "config: |" key is optional in 2.7+!
    -  instanceID: my-ci-controller
    -  artifactRepository: |    # However, all nested maps must be strings
    -   archiveLogs: true
    -   s3:
    -     endpoint: s3.amazonaws.com
    -     bucket: my-bucket
    -     region: us-west-2
    -     insecure: false
    -     accessKeySecret:
    -       name: my-s3-credentials
    -       key: accessKey
    -     secretKeySecret:
    -       name: my-s3-credentials
    -       key: secretKey
    -
    - - - - -

    Comments

    - - +

    Workflow Controller Config Map - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/workflow-creator/index.html b/workflow-creator/index.html index 1e5ce04c0966..f1cd36f29f53 100644 --- a/workflow-creator/index.html +++ b/workflow-creator/index.html @@ -1,3930 +1,68 @@ - - - - - - - - - - - - - Workflow Creator - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Workflow Creator - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Workflow Creator

    -
    -

    v2.9 and after

    -
    -

    If you create your workflow via the CLI or UI, an attempt will be made to label it with the user who created it

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  name: my-wf
    -  labels:
    -    workflows.argoproj.io/creator: admin
    -    # labels must be DNS formatted, so the "@" is replaces by '.at.'  
    -    workflows.argoproj.io/creator-email: admin.at.your.org
    -    workflows.argoproj.io/creator-preferred-username: admin-preferred-username
    -
    -
    -

    Note

    -

    Labels only contain [-_.0-9a-zA-Z], so any other characters will be turned into -.

    -
    - - - - -

    Comments

    - - +

    Workflow Creator - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/workflow-events/index.html b/workflow-events/index.html index d4f34cc17b89..a2d39a5a0c50 100644 --- a/workflow-events/index.html +++ b/workflow-events/index.html @@ -1,3952 +1,68 @@ - - - - - - - - - - - - - Workflow Events - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Workflow Events - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Workflow Events

    -
    -

    v2.7.2

    -
    -

    ⚠️ Do not use Kubernetes events for automation. Events maybe lost or rolled-up.

    -

    We emit Kubernetes events on certain events.

    -

    Workflow state change:

    -
      -
    • WorkflowRunning
    • -
    • WorkflowSucceeded
    • -
    • WorkflowFailed
    • -
    • WorkflowTimedOut
    • -
    -

    Node state change:

    -
      -
    • WorkflowNodeRunning
    • -
    • WorkflowNodeSucceeded
    • -
    • WorkflowNodeFailed
    • -
    • WorkflowNodeError
    • -
    -

    The involved object is the workflow in both cases. Additionally, for node state change events, annotations indicate the name and type of the involved node:

    -
    metadata:
    -  name: my-wf.160434cb3af841f8
    -  namespace: my-ns
    -  annotations:
    -    workflows.argoproj.io/node-name: my-node
    -    workflows.argoproj.io/node-type: Pod
    -type: Normal
    -reason: WorkflowNodeSucceeded
    -message: 'Succeeded node my-node: my message'
    -involvedObject:
    -  apiVersion: v1alpha1
    -  kind: Workflow
    -  name: my-wf
    -  namespace: my-ns
    -  resourceVersion: "1234"
    -  uid: my-uid
    -firstTimestamp: "2020-04-09T16:50:16Z"
    -lastTimestamp: "2020-04-09T16:50:16Z"
    -count: 1
    -
    - - - - -

    Comments

    - - +

    Workflow Events - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/workflow-executors/index.html b/workflow-executors/index.html index 377a63894d83..e4089bb9303b 100644 --- a/workflow-executors/index.html +++ b/workflow-executors/index.html @@ -1,4207 +1,68 @@ - - - - - - - - - - - - - Workflow Executors - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Workflow Executors - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Workflow Executors

    -

    A workflow executor is a process that conforms to a specific interface that allows Argo to perform certain actions like monitoring pod logs, collecting artifacts, managing container life-cycles, etc.

    -

    The executor to be used in your workflows can be changed in the config map under the containerRuntimeExecutor key (removed in v3.4).

    -

    Emissary (emissary)

    -
    -

    v3.1 and after

    -
    -

    Default in >= v3.3.

    -

    This is the most fully featured executor.

    -
      -
    • Reliability:
        -
      • Works on GKE Autopilot
      • -
      • Does not require init process to kill sub-processes.
      • -
      -
    • -
    • More secure:
        -
      • No privileged access
      • -
      • Cannot escape the privileges of the pod's service account
      • -
      • Can runAsNonRoot.
      • -
      -
    • -
    • Scalable:
        -
      • It reads and writes to and from the container's disk and typically does not use any network APIs unless resource -type template is used.
      • -
      -
    • -
    • Artifacts:
        -
      • Output artifacts can be located on the base layer (e.g. /tmp).
      • -
      -
    • -
    • Configuration:
        -
      • command should be specified for containers.
      • -
      -
    • -
    -

    You can determine values as follows:

    -
    docker image inspect -f '{{.Config.Entrypoint}} {{.Config.Cmd}}' argoproj/argosay:v2
    -
    -

    Learn more about command and args

    -

    Image Index/Cache

    -

    If you don't provide command to run, the emissary will grab it from container image. You can also specify it using the workflow spec or emissary will look it up in the image index. This is nothing more fancy than -a configuration item.

    -

    Emissary will create a cache entry, using image with version as key and command as value, and it will reuse it for specific image/version.

    -

    Exit Code 64

    -

    The emissary will exit with code 64 if it fails. This may indicate a bug in the emissary.

    -

    Docker (docker)

    -

    ⚠️Deprecated. Removed in v3.4.

    -

    Default in <= v3.2.

    -
      -
    • Least secure:
        -
      • It requires privileged access to docker.sock of the host to be mounted which. Often rejected by Open Policy Agent (OPA) or your Pod Security Policy (PSP).
      • -
      • It can escape the privileges of the pod's service account
      • -
      • It cannot runAsNonRoot.
      • -
      -
    • -
    • Equal most scalable:
        -
      • It communicates directly with the local Docker daemon.
      • -
      -
    • -
    • Artifacts:
        -
      • Output artifacts can be located on the base layer (e.g. /tmp).
      • -
      -
    • -
    • Configuration:
        -
      • No additional configuration needed.
      • -
      -
    • -
    -

    Note: when using docker as workflow executors, messages printed in both stdout and stderr are captured in the Argo variable .outputs.result.

    -

    Kubelet (kubelet)

    -

    ⚠️Deprecated. Removed in v3.4.

    -
      -
    • Secure
        -
      • No privileged access
      • -
      • Cannot escape the privileges of the pod's service account
      • -
      • runAsNonRoot - TBD, see #4186
      • -
      -
    • -
    • Scalable:
        -
      • Operations performed against the local Kubelet
      • -
      -
    • -
    • Artifacts:
        -
      • Output artifacts must be saved on volumes (e.g. empty-dir) and not the base image layer (e.g. /tmp)
      • -
      -
    • -
    • Step/Task result:
        -
      • Warnings that normally goes to stderr will get captured in a step or a dag task's outputs.result. May require changes if your pipeline is conditioned on steps/tasks.name.outputs.result
      • -
      -
    • -
    • Configuration:
        -
      • Additional Kubelet configuration maybe needed
      • -
      -
    • -
    -

    Kubernetes API (k8sapi)

    -

    ⚠️Deprecated. Removed in v3.4.

    -
      -
    • Reliability:
        -
      • Works on GKE Autopilot
      • -
      -
    • -
    • Most secure:
        -
      • No privileged access
      • -
      • Cannot escape the privileges of the pod's service account
      • -
      • Can runAsNonRoot
      • -
      -
    • -
    • Least scalable:
        -
      • Log retrieval and container operations performed against the remote Kubernetes API
      • -
      -
    • -
    • Artifacts:
        -
      • Output artifacts must be saved on volumes (e.g. empty-dir) and not the base image layer (e.g. /tmp)
      • -
      -
    • -
    • Step/Task result:
        -
      • Warnings that normally goes to stderr will get captured in a step or a dag task's outputs.result. May require changes if your pipeline is conditioned on steps/tasks.name.outputs.result
      • -
      -
    • -
    • Configuration:
        -
      • No additional configuration needed.
      • -
      -
    • -
    -

    Process Namespace Sharing (pns)

    -

    ⚠️Deprecated. Removed in v3.4.

    -
      -
    • More secure:
        -
      • No privileged access
      • -
      • cannot escape the privileges of the pod's service account
      • -
      • Can runAsNonRoot, if you use volumes (e.g. empty-dir) for your output artifacts
      • -
      • Processes are visible to other containers in the pod. This includes all information visible in /proc, such as passwords that were passed as arguments or environment variables. These are protected only by regular Unix permissions.
      • -
      -
    • -
    • Scalable:
        -
      • Most operations use local procfs.
      • -
      • Log retrieval uses the remote Kubernetes API
      • -
      -
    • -
    • Artifacts:
        -
      • Output artifacts can be located on the base layer (e.g. /tmp)
      • -
      • Cannot capture artifacts from a base layer which has a volume mounted under it
      • -
      • Cannot capture artifacts from base layer if the container is short-lived.
      • -
      -
    • -
    • Configuration:
        -
      • No additional configuration needed.
      • -
      -
    • -
    • Process will no longer run with PID 1
    • -
    • Doesn't work for Windows containers.
    • -
    -

    Learn more

    - - - - -

    Comments

    - - +

    Workflow Executors - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/workflow-inputs/index.html b/workflow-inputs/index.html index 3b9cca6a1924..df40fe056f3a 100644 --- a/workflow-inputs/index.html +++ b/workflow-inputs/index.html @@ -1,4088 +1,68 @@ - - - - - - - - - - - - - Workflow Inputs - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Workflow Inputs - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Workflow Inputs

    -

    Introduction

    -

    Workflows and templates operate on a set of defined parameters and arguments that are supplied to the running container. The precise details of how to manage the inputs can be confusing; this article attempts to clarify concepts and provide simple working examples to illustrate the various configuration options.

    -

    The examples below are limited to DAGTemplates and mainly focused on parameters, but similar reasoning applies to the other types of templates.

    -

    Parameter Inputs

    -

    First, some clarification of terms is needed. For a glossary reference, see Argo Core Concepts.

    -

    A workflow provides arguments, which are passed in to the entry point template. A template defines inputs which are then provided by template callers (such as steps, dag, or even a workflow). The structure of both is identical.

    -

    For example, in a Workflow, one parameter would look like this:

    -
    arguments:
    -  parameters:
    -  - name: workflow-param-1
    -
    -

    And in a template:

    -
    inputs:
    -  parameters:
    -  - name: template-param-1
    -
    -

    Inputs to DAGTemplates use the arguments format:

    -
    dag:
    -  tasks:
    -  - name: step-A
    -    template: step-template-a
    -    arguments:
    -      parameters:
    -      - name: template-param-1
    -        value: abcd
    -
    -

    Previous examples in context:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: example-
    -spec:
    -  entrypoint: main
    -  arguments:
    -    parameters:
    -    - name: workflow-param-1
    -  templates:
    -  - name: main
    -    dag:
    -      tasks:
    -      - name: step-A 
    -        template: step-template-a
    -        arguments:
    -          parameters:
    -          - name: template-param-1
    -            value: "{{workflow.parameters.workflow-param-1}}"
    -
    -  - name: step-template-a
    -    inputs:
    -      parameters:
    -        - name: template-param-1
    -    script:
    -      image: alpine
    -      command: [/bin/sh]
    -      source: |
    -          echo "{{inputs.parameters.template-param-1}}"
    -
    -

    To run this example: argo submit -n argo example.yaml -p 'workflow-param-1="abcd"' --watch

    -

    Using Previous Step Outputs As Inputs

    -

    In DAGTemplates, it is common to want to take the output of one step and send it as the input to another step. However, there is a difference in how this works for artifacts vs parameters. Suppose our step-template-a defines some outputs:

    -
    outputs:
    -  parameters:
    -    - name: output-param-1
    -      valueFrom:
    -        path: /p1.txt
    -  artifacts:
    -    - name: output-artifact-1
    -      path: /some-directory
    -
    -

    In my DAGTemplate, I can send these outputs to another template like this:

    -
    dag:
    -  tasks:
    -  - name: step-A 
    -    template: step-template-a
    -    arguments:
    -      parameters:
    -      - name: template-param-1
    -        value: "{{workflow.parameters.workflow-param-1}}"
    -  - name: step-B
    -    dependencies: [step-A]
    -    template: step-template-b
    -    arguments:
    -      parameters:
    -      - name: template-param-2
    -        value: "{{tasks.step-A.outputs.parameters.output-param-1}}"
    -      artifacts:
    -      - name: input-artifact-1
    -        from: "{{tasks.step-A.outputs.artifacts.output-artifact-1}}"
    -
    -

    Note the important distinction between parameters and artifacts; they both share the name field, but one uses value and the other uses from.

    - - - - -

    Comments

    - - +

    Workflow Inputs - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/workflow-notifications/index.html b/workflow-notifications/index.html index 3bb7841633d8..36ea7b96064a 100644 --- a/workflow-notifications/index.html +++ b/workflow-notifications/index.html @@ -1,3924 +1,68 @@ - - - - - - - - - - - - - Workflow Notifications - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Workflow Notifications - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Workflow Notifications

    -

    There are a number of use cases where you may wish to notify an external system when a workflow completes:

    -
      -
    1. Send an email.
    2. -
    3. Send a Slack (or other instant message).
    4. -
    5. Send a message to Kafka (or other message bus).
    6. -
    -

    You have options:

    -
      -
    1. For individual workflows, can add an exit handler to your workflow, such as in this example.
    2. -
    3. If you want the same for every workflow, you can add an exit handler to the default workflow spec.
    4. -
    5. Use a service (e.g. Heptio Labs EventRouter) to the Workflow events we emit.
    6. -
    - - - - -

    Comments

    - - +

    Workflow Notifications - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/workflow-of-workflows/index.html b/workflow-of-workflows/index.html index b825be22e652..c27e66d26a5c 100644 --- a/workflow-of-workflows/index.html +++ b/workflow-of-workflows/index.html @@ -1,4071 +1,68 @@ - - - - - - - - - - - - - Workflow of Workflows - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Workflow of Workflows - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Workflow of Workflows

    -
    -

    v2.9 and after

    -
    -

    Introduction

    -

    The Workflow of Workflows pattern involves a parent workflow triggering one or more child workflows, managing them, and acting on their results.

    -

    Examples

    -

    You can use workflowTemplateRef to trigger a workflow inline.

    -
      -
    1. Define your workflow as a workflowtemplate.
    2. -
    -
    apiVersion: argoproj.io/v1alpha1
    -kind: WorkflowTemplate
    -metadata:
    -  name: workflow-template-submittable
    -spec:
    -  entrypoint: whalesay-template
    -  arguments:
    -    parameters:
    -      - name: message
    -        value: hello world
    -  templates:
    -    - name: whalesay-template
    -      inputs:
    -        parameters:
    -          - name: message
    -      container:
    -        image: docker/whalesay
    -        command: [cowsay]
    -        args: ["{{inputs.parameters.message}}"]
    -
    -
      -
    1. Create the Workflowtemplate in cluster using argo template create <yaml>
    2. -
    3. Define the workflow of workflows.
    4. -
    -
    # This template demonstrates a workflow of workflows.
    -# Workflow triggers one or more workflows and manages them.
    -apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: workflow-of-workflows-
    -spec:
    -  entrypoint: main
    -  templates:
    -    - name: main
    -      steps:
    -        - - name: workflow1
    -            template: resource-without-argument
    -            arguments:
    -              parameters:
    -              - name: workflowtemplate
    -                value: "workflow-template-submittable"
    -        - - name: workflow2
    -            template: resource-with-argument
    -            arguments:
    -              parameters:
    -              - name: workflowtemplate
    -                value: "workflow-template-submittable"
    -              - name: message
    -                value: "Welcome Argo"
    -
    -    - name: resource-without-argument
    -      inputs:
    -        parameters:
    -          - name: workflowtemplate
    -      resource:
    -        action: create
    -        manifest: |
    -          apiVersion: argoproj.io/v1alpha1
    -          kind: Workflow
    -          metadata:
    -            generateName: workflow-of-workflows-1-
    -          spec:
    -            workflowTemplateRef:
    -              name: {{inputs.parameters.workflowtemplate}}
    -        successCondition: status.phase == Succeeded
    -        failureCondition: status.phase in (Failed, Error)
    -
    -    - name: resource-with-argument
    -      inputs:
    -        parameters:
    -          - name: workflowtemplate
    -          - name: message
    -      resource:
    -        action: create
    -        manifest: |
    -          apiVersion: argoproj.io/v1alpha1
    -          kind: Workflow
    -          metadata:
    -            generateName: workflow-of-workflows-2-
    -          spec:
    -            arguments:
    -              parameters:
    -              - name: message
    -                value: {{inputs.parameters.message}}
    -            workflowTemplateRef:
    -              name: {{inputs.parameters.workflowtemplate}}
    -        successCondition: status.phase == Succeeded
    -        failureCondition: status.phase in (Failed, Error)
    -
    - - - - -

    Comments

    - - +

    Workflow of Workflows - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/workflow-pod-security-context/index.html b/workflow-pod-security-context/index.html index bacc94494e65..5c20d1f32e8f 100644 --- a/workflow-pod-security-context/index.html +++ b/workflow-pod-security-context/index.html @@ -1,3933 +1,68 @@ - - - - - - - - - - - - - Workflow Pod Security Context - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Workflow Pod Security Context - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Workflow Pod Security Context

    -

    By default, all workflow pods run as root. The Docker executor even requires privileged: true.

    -

    For other workflow executors, you can run your workflow pods more securely by configuring the security context for your workflow pod.

    -

    This is likely to be necessary if you have a pod security policy. You probably can't use the Docker executor if you have a pod security policy.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: security-context-
    -spec:
    -  securityContext:
    -    runAsNonRoot: true
    -    runAsUser: 8737 #; any non-root user
    -
    -

    You can configure this globally using workflow defaults.

    -
    -

    It is easy to make a workflow need root unintentionally

    -

    You may find that user's workflows have been written to require root with seemingly innocuous code. E.g. mkdir /my-dir would require root.

    -
    -
    -

    You must use volumes for output artifacts

    -

    If you use runAsNonRoot - you cannot have output artifacts on base layer (e.g. /tmp). You must use a volume (e.g. empty dir).

    -
    - - - - -

    Comments

    - - +

    Workflow Pod Security Context - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/workflow-rbac/index.html b/workflow-rbac/index.html index 7dec1fef23fd..c9227b00267f 100644 --- a/workflow-rbac/index.html +++ b/workflow-rbac/index.html @@ -1,3950 +1,68 @@ - - - - - - - - - - - - - Workflow RBAC - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Workflow RBAC - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Workflow RBAC

    -

    All pods in a workflow run with the service account specified in workflow.spec.serviceAccountName, or if omitted, -the default service account of the workflow's namespace. The amount of access which a workflow needs is dependent on -what the workflow needs to do. For example, if your workflow needs to deploy a resource, then the workflow's service -account will require 'create' privileges on that resource.

    -

    Warning: We do not recommend using the default service account in production. It is a shared account so may have -permissions added to it you do not want. Instead, create a service account only for your workflow.

    -

    The minimum for the executor to function:

    -

    For >= v3.4:

    -
    apiVersion: rbac.authorization.k8s.io/v1
    -kind: Role
    -metadata:
    -  name: executor
    -rules:
    -  - apiGroups:
    -      - argoproj.io
    -    resources:
    -      - workflowtaskresults
    -    verbs:
    -      - create
    -      - patch
    -
    -

    For <= v3.3 use.

    -
    apiVersion: rbac.authorization.k8s.io/v1
    -kind: Role
    -metadata:
    -  name: executor
    -rules:
    -  - apiGroups:
    -      - ""
    -    resources:
    -      - pods
    -    verbs:
    -      - get
    -      - patch
    -
    -

    Warning: For many organizations, it may not be acceptable to give a workflow the pod patch permission, see #3961

    -

    If you are not using the emissary, you'll need additional permissions. -See executor for suitable permissions.

    - - - - -

    Comments

    - - +

    Workflow RBAC - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/workflow-restrictions/index.html b/workflow-restrictions/index.html index 3a51d5093aa2..04389307d30d 100644 --- a/workflow-restrictions/index.html +++ b/workflow-restrictions/index.html @@ -1,4009 +1,68 @@ - - - - - - - - - - - - - Workflow Restrictions - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Workflow Restrictions - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    Workflow Restrictions

    -
    -

    v2.9 and after

    -
    -

    Introduction

    -

    As the administrator of the controller, you may want to limit which types of Workflows your users can run. -Workflow Restrictions allow you to set requirements for all Workflows.

    -

    Available Restrictions

    -
      -
    • templateReferencing: Strict: Only process Workflows using workflowTemplateRef. You can use this to require usage of WorkflowTemplates, disallowing arbitrary Workflow execution.
    • -
    • templateReferencing: Secure: Same as Strict plus enforce that a referenced WorkflowTemplate hasn't changed between operations. If a running Workflow's underlying WorkflowTemplate changes, the Workflow will error out.
    • -
    -

    Setting Workflow Restrictions

    -

    You can add workflowRestrictions in the workflow-controller-configmap.

    -

    For example, to specify that Workflows may only run with workflowTemplateRef:

    -
    # This file describes the config settings available in the workflow controller configmap
    -apiVersion: v1
    -kind: ConfigMap
    -metadata:
    -  name: workflow-controller-configmap
    -data:
    -  workflowRestrictions: |
    -    templateReferencing: Strict
    -
    - - - - -

    Comments

    - - +

    Workflow Restrictions - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/workflow-submitting-workflow/index.html b/workflow-submitting-workflow/index.html index 59c9d31403f6..43e92e65a6b2 100644 --- a/workflow-submitting-workflow/index.html +++ b/workflow-submitting-workflow/index.html @@ -1,3938 +1,68 @@ - - - - - - - - - - - - - One Workflow Submitting Another - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + One Workflow Submitting Another - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - -
    -
    -
    - - - -
    -
    -
    - - +
    +
    +
    +
    - - - - - - - - -

    One Workflow Submitting Another

    -
    -

    v2.8 and after

    -
    -

    If you want one workflow to create another, you can do this using curl. You'll need an access token. Typically the best way is to submit from a workflow template:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: demo-
    -spec:
    -  entrypoint: main
    -  templates:
    -    - name: main
    -      steps:
    -        - - name: a
    -            template: create-wf
    -    - name: create-wf
    -      script:
    -        image: curlimages/curl:latest
    -        command:
    -          - sh
    -        source: >
    -          curl https://argo-server:2746/api/v1/workflows/argo/submit \
    -            -fs \
    -            -H "Authorization: Bearer eyJhbGci..." \
    -            -d '{"resourceKind": "WorkflowTemplate", "resourceName": "wait", "submitOptions": {"labels": "workflows.argoproj.io/workflow-template=wait"}}'
    -
    - - - - -

    Comments

    - - +

    One Workflow Submitting Another - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file diff --git a/workflow-templates/index.html b/workflow-templates/index.html index 4fc997b58e6a..6c24676a2437 100644 --- a/workflow-templates/index.html +++ b/workflow-templates/index.html @@ -1,4491 +1,68 @@ - - - - - - - - - - - - - Workflow Templates - Argo Workflows - The workflow engine for Kubernetes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + Workflow Templates - Argo Workflows - The workflow engine for Kubernetes + + + + + + + - - - - - - - - - - - - - - - - - - - -
    - -
    - - - - - - -
    - - - - - +
    - - - - - - - -
    - - - -
    - - - -
    -
    -
    - - - - - - -
    -
    -
    - - - - - - +
    +
    +
    +
    - - - - - - - - -

    Workflow Templates

    -
    -

    v2.4 and after

    -
    -

    Introduction

    -

    WorkflowTemplates are definitions of Workflows that live in your cluster. This allows you to create a library of -frequently-used templates and reuse them either by submitting them directly (v2.7 and after) or by referencing them from -your Workflows.

    -

    WorkflowTemplate vs template

    -

    The terms WorkflowTemplate and template have created an unfortunate naming collision and have created some confusion -in the past. However, a quick description should clarify each and their differences.

    -
      -
    • A template (lower-case) is a task within a Workflow or (confusingly) a WorkflowTemplate under the field templates. Whenever you define a -Workflow, you must define at least one (but usually more than one) template to run. This template can be of type -container, script, dag, steps, resource, or suspend and can be referenced by an entrypoint or by other -dag, and step templates.
    • -
    -

    Here is an example of a Workflow with two templates:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: steps-
    -spec:
    -  entrypoint: hello           # We reference our first "template" here
    -
    -  templates:
    -  - name: hello               # The first "template" in this Workflow, it is referenced by "entrypoint"
    -    steps:                    # The type of this "template" is "steps"
    -    - - name: hello
    -        template: whalesay    # We reference our second "template" here
    -        arguments:
    -          parameters: [{name: message, value: "hello1"}]
    -
    -  - name: whalesay             # The second "template" in this Workflow, it is referenced by "hello"
    -    inputs:
    -      parameters:
    -      - name: message
    -    container:                # The type of this "template" is "container"
    -      image: docker/whalesay
    -      command: [cowsay]
    -      args: ["{{inputs.parameters.message}}"]
    -
    -
      -
    • A WorkflowTemplate is a definition of a Workflow that lives in your cluster. Since it is a definition of a Workflow -it also contains templates. These templates can be referenced from within the WorkflowTemplate and from other Workflows -and WorkflowTemplates on your cluster. To see how, please see Referencing Other WorkflowTemplates.
    • -
    -

    WorkflowTemplate Spec

    -
    -

    v2.7 and after

    -
    -

    In v2.7 and after, all the fields in WorkflowSpec (except for priority that must be configured in a WorkflowSpec itself) are supported for WorkflowTemplates. You can take any existing Workflow you may have and convert it to a WorkflowTemplate by substituting kind: Workflow to kind: WorkflowTemplate.

    -
    -

    v2.4 – 2.6

    -
    -

    WorkflowTemplates in v2.4 - v2.6 are only partial Workflow definitions and only support the templates and -arguments field.

    -

    This would not be a valid WorkflowTemplate in v2.4 - v2.6 (notice entrypoint field):

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: WorkflowTemplate
    -metadata:
    -  name: workflow-template-submittable
    -spec:
    -  entrypoint: whalesay-template     # Fields other than "arguments" and "templates" not supported in v2.4 - v2.6
    -  arguments:
    -    parameters:
    -      - name: message
    -        value: hello world
    -  templates:
    -    - name: whalesay-template
    -      inputs:
    -        parameters:
    -          - name: message
    -      container:
    -        image: docker/whalesay
    -        command: [cowsay]
    -        args: ["{{inputs.parameters.message}}"]
    -
    -

    However, this would be a valid WorkflowTemplate:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: WorkflowTemplate
    -metadata:
    -  name: workflow-template-submittable
    -spec:
    -  arguments:
    -    parameters:
    -      - name: message
    -        value: hello world
    -  templates:
    -    - name: whalesay-template
    -      inputs:
    -        parameters:
    -          - name: message
    -      container:
    -        image: docker/whalesay
    -        command: [cowsay]
    -        args: ["{{inputs.parameters.message}}"]
    -
    -

    Adding labels/annotations to Workflows with workflowMetadata

    -
    -

    2.10.2 and after

    -
    -

    To automatically add labels and/or annotations to Workflows created from WorkflowTemplates, use workflowMetadata.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: WorkflowTemplate
    -metadata:
    -  name: workflow-template-submittable
    -spec:
    -  workflowMetadata:
    -    labels:
    -      example-label: example-value
    -
    -

    Working with parameters

    -

    When working with parameters in a WorkflowTemplate, please note the following:

    -
      -
    • When working with global parameters, you can instantiate your global variables in your Workflow -and then directly reference them in your WorkflowTemplate. Below is a working example:
    • -
    -
    apiVersion: argoproj.io/v1alpha1
    -kind: WorkflowTemplate
    -metadata:
    -  name: hello-world-template-global-arg
    -spec:
    -  serviceAccountName: argo
    -  templates:
    -    - name: hello-world
    -      container:
    -        image: docker/whalesay
    -        command: [cowsay]
    -        args: ["{{workflow.parameters.global-parameter}}"]
    ----
    -apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: hello-world-wf-global-arg-
    -spec:
    -  serviceAccountName: argo
    -  entrypoint: whalesay
    -  arguments:
    -    parameters:
    -      - name: global-parameter
    -        value: hello
    -  templates:
    -    - name: whalesay
    -      steps:
    -        - - name: hello-world
    -            templateRef:
    -              name: hello-world-template-global-arg
    -              template: hello-world
    -
    -
      -
    • When working with local parameters, the values of local parameters must be supplied at the template definition inside -the WorkflowTemplate. Below is a working example:
    • -
    -
    apiVersion: argoproj.io/v1alpha1
    -kind: WorkflowTemplate
    -metadata:
    -  name: hello-world-template-local-arg
    -spec:
    -  templates:
    -    - name: hello-world
    -      inputs:
    -        parameters:
    -          - name: msg
    -            value: "hello world"
    -      container:
    -        image: docker/whalesay
    -        command: [cowsay]
    -        args: ["{{inputs.parameters.msg}}"]
    ----
    -apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: hello-world-local-arg-
    -spec:
    -  entrypoint: whalesay
    -  templates:
    -    - name: whalesay
    -      steps:
    -        - - name: hello-world
    -            templateRef:
    -              name: hello-world-template-local-arg
    -              template: hello-world
    -
    -

    Referencing other WorkflowTemplates

    -

    You can reference templates from another WorkflowTemplates (see the difference between the two) using a templateRef field. -Just as how you reference other templates within the same Workflow, you should do so from a steps or dag template.

    -

    Here is an example from a steps template:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: workflow-template-hello-world-
    -spec:
    -  entrypoint: whalesay
    -  templates:
    -  - name: whalesay
    -    steps:                              # You should only reference external "templates" in a "steps" or "dag" "template".
    -      - - name: call-whalesay-template
    -          templateRef:                  # You can reference a "template" from another "WorkflowTemplate" using this field
    -            name: workflow-template-1   # This is the name of the "WorkflowTemplate" CRD that contains the "template" you want
    -            template: whalesay-template # This is the name of the "template" you want to reference
    -          arguments:                    # You can pass in arguments as normal
    -            parameters:
    -            - name: message
    -              value: "hello world"
    -
    -

    You can also do so similarly with a dag template:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: workflow-template-hello-world-
    -spec:
    -  entrypoint: whalesay
    -  templates:
    -  - name: whalesay
    -    dag:
    -      tasks:
    -        - name: call-whalesay-template
    -          templateRef:
    -            name: workflow-template-1
    -            template: whalesay-template
    -          arguments:
    -            parameters:
    -            - name: message
    -              value: "hello world"
    -
    -

    You should never reference another template directly on a template object (outside of a steps or dag template). -This includes both using template and templateRef. -This behavior is deprecated, no longer supported, and will be removed in a future version.

    -

    Here is an example of a deprecated reference that should not be used:

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: workflow-template-hello-world-
    -spec:
    -  entrypoint: whalesay
    -  templates:
    -  - name: whalesay
    -    template:                     # You should NEVER use "template" here. Use it under a "steps" or "dag" template (see above).
    -    templateRef:                  # You should NEVER use "templateRef" here. Use it under a "steps" or "dag" template (see above).
    -      name: workflow-template-1
    -      template: whalesay-template
    -    arguments:                    # Arguments here are ignored. Use them under a "steps" or "dag" template (see above).
    -      parameters:
    -      - name: message
    -        value: "hello world"
    -
    -

    The reasoning for deprecating this behavior is that a template is a "definition": it defines inputs and things to be -done once instantiated. With this deprecated behavior, the same template object is allowed to be an "instantiator": -to pass in "live" arguments and reference other templates (those other templates may be "definitions" or "instantiators").

    -

    This behavior has been problematic and dangerous. It causes confusion and has design inconsistencies.

    -
    -

    2.9 and after

    -
    -

    Create Workflow from WorkflowTemplate Spec

    -

    You can create Workflow from WorkflowTemplate spec using workflowTemplateRef. If you pass the arguments to created Workflow, it will be merged with workflow template arguments. -Here is an example for referring WorkflowTemplate as Workflow with passing entrypoint and Workflow Arguments to WorkflowTemplate

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: workflow-template-hello-world-
    -spec:
    -  entrypoint: whalesay-template
    -  arguments:
    -    parameters:
    -      - name: message
    -        value: "from workflow"
    -  workflowTemplateRef:
    -    name: workflow-template-submittable
    -
    -

    Here is an example of a referring WorkflowTemplate as Workflow and using WorkflowTemplates's entrypoint and Workflow Arguments

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: Workflow
    -metadata:
    -  generateName: workflow-template-hello-world-
    -spec:
    -  workflowTemplateRef:
    -    name: workflow-template-submittable
    -
    -

    Managing WorkflowTemplates

    -

    CLI

    -

    You can create some example templates as follows:

    -
    argo template create https://raw.githubusercontent.com/argoproj/argo-workflows/main/examples/workflow-template/templates.yaml
    -
    -

    Then submit a workflow using one of those templates:

    -
    argo submit https://raw.githubusercontent.com/argoproj/argo-workflows/main/examples/workflow-template/hello-world.yaml
    -
    -
    -

    2.7 and after

    -
    -

    Then submit a WorkflowTemplate as a Workflow:

    -
    argo submit --from workflowtemplate/workflow-template-submittable
    -
    -

    If you need to submit a WorkflowTemplate as a Workflow with parameters:

    -
    argo submit --from workflowtemplate/workflow-template-submittable -p message=value1
    -
    -

    kubectl

    -

    Using kubectl apply -f and kubectl get wftmpl

    -

    GitOps via Argo CD

    -

    WorkflowTemplate resources can be managed with GitOps by using Argo CD

    -

    UI

    -

    WorkflowTemplate resources can also be managed by the UI

    -

    Users can specify options under enum to enable drop-down list selection when submitting WorkflowTemplates from the UI.

    -
    apiVersion: argoproj.io/v1alpha1
    -kind: WorkflowTemplate
    -metadata:
    -  name: workflow-template-with-enum-values
    -spec:
    -  entrypoint: argosay
    -  arguments:
    -    parameters:
    -      - name: message
    -        value: one
    -        enum:
    -          -   one
    -          -   two
    -          -   three
    -  templates:
    -    - name: argosay
    -      inputs:
    -        parameters:
    -          - name: message
    -            value: '{{workflow.parameters.message}}'
    -      container:
    -        name: main
    -        image: 'argoproj/argosay:v2'
    -        command:
    -          - /argosay
    -        args:
    -          - echo
    -          - '{{inputs.parameters.message}}'
    -
    - - - - -

    Comments

    - - +

    Workflow Templates - Argo Workflows - The workflow engine for Kubernetes

    +

    This page has moved to https://argo-workflows.readthedocs.io/en/latest/{{path_subpath}}.

    +

    You should be redirected there automatically. Please click the link above if you are not redirected.

    - - - - Back to top - -
    - - - -
    -
    -
    +
    + - - - - - - \ No newline at end of file