Skip to content

Set default environment name to the cluster name #2164

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
May 11, 2021
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -56,14 +56,14 @@ commands:
steps:
- run:
name: Create Cluster
command: cortex cluster up << parameters.config >> --configure-env aws -y
command: cortex cluster up << parameters.config >> --configure-env cortex -y
- run:
name: Run E2E Tests
no_output_timeout: 30m
command: |
pytest -v test/e2e/tests --env aws --skip-autoscaling --skip-load --skip-long-running
pytest -v test/e2e/tests --env aws -k test_autoscaling
pytest -v test/e2e/tests --env aws -k test_load
pytest -v test/e2e/tests --env cortex --skip-autoscaling --skip-load --skip-long-running
pytest -v test/e2e/tests --env cortex -k test_autoscaling
pytest -v test/e2e/tests --env cortex -k test_load
- run:
name: Delete Cluster
command: cortex cluster down --config << parameters.config >> -y
Expand Down
39 changes: 22 additions & 17 deletions cli/cmd/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ var _eksctlPrefixRegex = regexp.MustCompile(`^.*[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]

func clusterInit() {
_clusterUpCmd.Flags().SortFlags = false
_clusterUpCmd.Flags().StringVarP(&_flagClusterUpEnv, "configure-env", "e", "aws", "name of environment to configure")
_clusterUpCmd.Flags().StringVarP(&_flagClusterUpEnv, "configure-env", "e", "", "name of environment to configure (default: the name of your cluster)")
_clusterUpCmd.Flags().BoolVarP(&_flagClusterDisallowPrompt, "yes", "y", false, "skip prompts")
_clusterCmd.AddCommand(_clusterUpCmd)

Expand Down Expand Up @@ -144,26 +144,31 @@ var _clusterUpCmd = &cobra.Command{

clusterConfigFile := args[0]

envExists, err := isEnvConfigured(_flagClusterUpEnv)
if err != nil {
if _, err := docker.GetDockerClient(); err != nil {
exit.Error(err)
}
if envExists {
if _flagClusterDisallowPrompt {
fmt.Printf("found an existing environment named \"%s\", which will be overwritten to connect to this cluster once it's created\n\n", _flagClusterUpEnv)
} else {
prompt.YesOrExit(fmt.Sprintf("found an existing environment named \"%s\"; would you like to overwrite it to connect to this cluster once it's created?", _flagClusterUpEnv), "", "you can specify a different environment name to be configured to connect to this cluster by specifying the --configure-env flag (e.g. `cortex cluster up --configure-env prod`); or you can list your environments with `cortex env list` and delete an environment with `cortex env delete ENV_NAME`")
}
}

if _, err := docker.GetDockerClient(); err != nil {
accessConfig, err := getNewClusterAccessConfig(clusterConfigFile)
if err != nil {
exit.Error(err)
}

accessConfig, err := getNewClusterAccessConfig(clusterConfigFile)
envName := _flagClusterUpEnv
if envName == "" {
envName = accessConfig.ClusterName
}

envExists, err := isEnvConfigured(envName)
if err != nil {
exit.Error(err)
}
if envExists {
if _flagClusterDisallowPrompt {
fmt.Printf("found an existing environment named \"%s\", which will be overwritten to connect to this cluster once it's created\n\n", envName)
} else {
prompt.YesOrExit(fmt.Sprintf("found an existing environment named \"%s\"; would you like to overwrite it to connect to this cluster once it's created?", envName), "", "you can specify a different environment name to be configured to connect to this cluster by specifying the --configure-env flag (e.g. `cortex cluster up --configure-env prod`); or you can list your environments with `cortex env list` and delete an environment with `cortex env delete ENV_NAME`")
}
}

awsClient, err := newAWSClient(accessConfig.Region, true)
if err != nil {
Expand Down Expand Up @@ -290,23 +295,23 @@ var _clusterUpCmd = &cobra.Command{

loadBalancer, err := getLoadBalancer(clusterConfig.ClusterName, OperatorLoadBalancer, awsClient)
if err != nil {
exit.Error(errors.Append(err, fmt.Sprintf("\n\nyou can attempt to resolve this issue and configure your cli environment by running `cortex cluster info --configure-env %s`", _flagClusterUpEnv)))
exit.Error(errors.Append(err, fmt.Sprintf("\n\nyou can attempt to resolve this issue and configure your cli environment by running `cortex cluster info --configure-env %s`", envName)))
}

newEnvironment := cliconfig.Environment{
Name: _flagClusterUpEnv,
Name: envName,
OperatorEndpoint: "https://" + *loadBalancer.DNSName,
}

err = addEnvToCLIConfig(newEnvironment, true)
if err != nil {
exit.Error(errors.Append(err, fmt.Sprintf("\n\nyou can attempt to resolve this issue and configure your cli environment by running `cortex cluster info --configure-env %s`", _flagClusterUpEnv)))
exit.Error(errors.Append(err, fmt.Sprintf("\n\nyou can attempt to resolve this issue and configure your cli environment by running `cortex cluster info --configure-env %s`", envName)))
}

if envExists {
fmt.Printf(console.Bold("\nthe environment named \"%s\" has been updated to point to this cluster (and was set as the default environment)\n"), _flagClusterUpEnv)
fmt.Printf(console.Bold("\nthe environment named \"%s\" has been updated to point to this cluster (and was set as the default environment)\n"), envName)
} else {
fmt.Printf(console.Bold("\nan environment named \"%s\" has been configured to point to this cluster (and was set as the default environment)\n"), _flagClusterUpEnv)
fmt.Printf(console.Bold("\nan environment named \"%s\" has been configured to point to this cluster (and was set as the default environment)\n"), envName)
}
},
}
Expand Down
1 change: 0 additions & 1 deletion cli/cmd/errors.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@ const (
ErrCortexYAMLNotFound = "cli.cortex_yaml_not_found"
ErrDockerCtrlC = "cli.docker_ctrl_c"
ErrResponseUnknown = "cli.response_unknown"
ErrOnlyAWSClusterFlagSet = "cli.only_aws_cluster_flag_set"
ErrMissingAWSCredentials = "cli.missing_aws_credentials"
ErrCredentialsInClusterConfig = "cli.credentials_in_cluster_config"
ErrClusterUp = "cli.cluster_up"
Expand Down
2 changes: 1 addition & 1 deletion docs/clients/cli.md
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ Usage:
cortex cluster up CLUSTER_CONFIG_FILE [flags]

Flags:
-e, --configure-env string name of environment to configure (default "aws")
-e, --configure-env string name of environment to configure (default: the name of your cluster)
-y, --yes skip prompts
-h, --help help for up
```
Expand Down
2 changes: 1 addition & 1 deletion docs/clusters/management/environments.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Environments

When you create a cluster with `cortex cluster up`, an environment named `aws` is automatically created to point to your cluster and is configured to be the default environment. You can name the environment something else via the `--configure-env` flag, e.g. `cortex cluster up --configure-env prod`. You can also use the `--configure-env` flag with `cortex cluster info` to create / update the specified environment.
When you create a cluster with `cortex cluster up`, an environment with the same name as your cluster is automatically created to point to your cluster and is configured to be the default environment. You can name the environment something else via the `--configure-env` flag, e.g. `cortex cluster up --configure-env prod`. You can also use the `--configure-env` flag with `cortex cluster info` to create / update the specified environment.

You can list your environments with `cortex env list`, change the default environment with `cortex env default`, delete an environment with `cortex env delete`, and create/update an environment with `cortex env configure`.

Expand Down
2 changes: 1 addition & 1 deletion docs/workloads/batch/example.md
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ cortex get image-classifier
import cortex
import requests

cx = cortex.client("aws")
cx = cortex.client("cortex")
batch_endpoint = cx.get_api("image-classifier")["endpoint"]

dest_s3_dir = # specify S3 directory for the results, e.g. "s3://my-bucket/dir" (make sure your cluster has access to this bucket)
Expand Down
2 changes: 1 addition & 1 deletion docs/workloads/dependencies/example.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ api_spec = {
}
}

cx = cortex.client("aws")
cx = cortex.client("cortex")
cx.deploy(api_spec, project_dir=".")
```

Expand Down
8 changes: 4 additions & 4 deletions docs/workloads/realtime/metrics.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@ response code counts (summed over the past 2 weeks) for your APIs:
```bash
cortex get

env api status up-to-date requested last update avg request 2XX
aws iris-classifier live 1 1 17m 24ms 1223
aws text-generator live 1 1 8m 180ms 433
aws image-classifier-resnet50 live 2 2 1h 32ms 1121126
env api status up-to-date requested last update avg request 2XX
cortex iris-classifier live 1 1 17m 24ms 1223
cortex text-generator live 1 1 8m 180ms 433
cortex image-classifier-resnet50 live 2 2 1h 32ms 1121126
```

The `cortex get API_NAME` command also provides a link to a Grafana dashboard:
Expand Down
2 changes: 1 addition & 1 deletion docs/workloads/realtime/multi-model/example.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ requirements = ["tensorflow", "transformers", "wget", "fasttext"]

api_spec = {"name": "multi-model", "kind": "RealtimeAPI"}

cx = cortex.client("aws")
cx = cortex.client("cortex")
cx.deploy_realtime_api(api_spec, handler=Handler, requirements=requirements)
```

Expand Down
2 changes: 1 addition & 1 deletion docs/workloads/realtime/traffic-splitter/example.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ api_spec_gpu = {
},
}

cx = cortex.client("aws")
cx = cortex.client("cortex")
cx.deploy_realtime_api(api_spec_cpu, handler=Handler, requirements=requirements)
cx.deploy_realtime_api(api_spec_gpu, handler=Handler, requirements=requirements)
```
Expand Down
2 changes: 1 addition & 1 deletion docs/workloads/task/example.md
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ Or, using Python `requests`:
import cortex
import requests

cx = cortex.client("aws") # "aws" is the name of the Cortex environment used in this example
cx = cortex.client("cortex") # "cortex" is the name of the Cortex environment used in this example
task_endpoint = cx.get_api("train-iris")["endpoint"]

dest_s3_dir = # S3 directory where the model will be uploaded, e.g. "s3://my-bucket/dir"
Expand Down
20 changes: 9 additions & 11 deletions test/apis/batch/image-classifier/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@

This example shows how to deploy a batch image classification api that accepts a list of image urls as input, downloads the images, classifies them, and writes the results to S3.

**Batch APIs are only supported in AWS.** You can find cluster installation documentation [here](../../../docs/aws/install.md).

## Pre-requisites

* [Install](../../../docs/aws/install.md) Cortex and create a cluster
Expand Down Expand Up @@ -147,15 +145,15 @@ Here are the complete [API configuration docs](../../../docs/workloads/batch/con
`cortex deploy` takes your model, your `handler.py` implementation, and your configuration from `cortex.yaml` and creates an endpoint that can receive job submissions and manage running jobs.

```bash
$ cortex deploy --env aws
$ cortex deploy

created image-classifier (BatchAPI)
```

Get the endpoint for your Batch API with `cortex get image-classifier`:

```bash
$ cortex get image-classifier --env aws
$ cortex get image-classifier

no submitted jobs

Expand Down Expand Up @@ -218,7 +216,7 @@ Take note of the job id in the response.
### List the jobs for your Batch API

```bash
$ cortex get image-classifier --env aws
$ cortex get image-classifier

job id status progress start time duration
69d6faf82e4660d3 running 0/3 20 Jul 2020 01:07:44 UTC 3m26s
Expand Down Expand Up @@ -248,7 +246,7 @@ $ curl http://***.elb.us-west-2.amazonaws.com/image-classifier?jobID=69d6faf82e4
You can also use the Cortex CLI to get the status of your job using `cortex get <BATCH_API_NAME> <JOB_ID>`.

```bash
$ cortex get image-classifier 69d6faf82e4660d3 --env aws
$ cortex get image-classifier 69d6faf82e4660d3

job id: 69d6faf82e4660d3
status: running
Expand All @@ -273,7 +271,7 @@ job endpoint: http://***.elb.us-west-2.amazonaws.com/image-classifier/69d6faf82e
You can stream logs realtime for debugging and monitoring purposes with `cortex logs <BATCH_API_NAME> <JOB_ID>`

```bash
$ cortex logs image-classifier 69d6fdeb2d8e6647 --env aws
$ cortex logs image-classifier 69d6fdeb2d8e6647

started enqueuing batches to queue
partitioning 5 items found in job submission into 3 batches of size 2
Expand Down Expand Up @@ -398,7 +396,7 @@ After submitting this job, you should get a response like this:
Wait for the job to complete by streaming the logs with `cortex logs <BATCH_API_NAME> <JOB_ID>` or watching for the job status to change with `cortex get <BATCH_API_NAME> <JOB_ID> --watch`.

```bash
$ cortex logs image-classifier 69d6faf82e4660d3 --env aws
$ cortex logs image-classifier 69d6faf82e4660d3

started enqueuing batches to queue
enqueuing contents from file s3://cortex-examples/image-classifier/urls_0.json
Expand Down Expand Up @@ -507,7 +505,7 @@ You should get a response like this:
Wait for the job to complete by streaming the logs with `cortex logs <BATCH_API_NAME> <JOB_ID>` or watching for the job status to change with `cortex get <BATCH_API_NAME> <JOB_ID> --watch`.

```bash
$ cortex logs image-classifier 69d6f8a472f0e1e5 --env aws
$ cortex logs image-classifier 69d6f8a472f0e1e5

started enqueuing batches to queue
completed enqueuing a total of 8 batches
Expand Down Expand Up @@ -550,7 +548,7 @@ stopped job 69d96a01ea55da8c
You can also use the Cortex CLI `cortex delete <BATCH_API_NAME> <JOB_ID>`.

```bash
$ cortex delete image-classifier 69d96a01ea55da8c --env aws
$ cortex delete image-classifier 69d96a01ea55da8c

stopped job 69d96a01ea55da8c
```
Expand All @@ -562,7 +560,7 @@ stopped job 69d96a01ea55da8c
Run `cortex delete` to delete the API:

```bash
$ cortex delete image-classifier --env aws
$ cortex delete image-classifier

deleting image-classifier
```
Expand Down
2 changes: 1 addition & 1 deletion test/apis/grpc/iris-classifier-sklearn/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ sample = iris_classifier_pb2.Sample(
petal_width=0.3
)

cx = cortex.client("aws")
cx = cortex.client("cortex")
api = cx.get_api("iris-classifier")
grpc_endpoint = api["endpoint"] + ":" + str(api["grpc_ports"]["insecure"])
channel = grpc.insecure_channel(grpc_endpoint)
Expand Down
2 changes: 1 addition & 1 deletion test/apis/grpc/prime-number-generator/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import cortex
import generator_pb2
import generator_pb2_grpc

cx = cortex.client("aws")
cx = cortex.client("cortex")
api = cx.get_api("prime-generator")
grpc_endpoint = api["endpoint"] + ":" + str(api["grpc_ports"]["insecure"])

Expand Down
4 changes: 0 additions & 4 deletions test/apis/model-caching/python/translator/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,17 +57,13 @@ Once the cluster is spun up (roughly 20 minutes), we can deploy by running:
cortex deploy
```

(I've configured my CLI to default to the AWS environment by running `cortex env default aws`)

Now, we wait for the API to become live. You can track its status with `cortex get --watch`.

Note that after the API goes live, we may need to wait a few minutes for it to register all the models hosted in the S3 bucket. Because the bucket is so large, it takes Cortex a bit longer than usual. When it's done, running `cortex get translator` should return something like:

```
cortex get translator

using aws environment

status up-to-date requested last update avg request 2XX
live 1 1 3m -- --

Expand Down
23 changes: 10 additions & 13 deletions test/apis/traffic-splitter/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ To deploy this example:
## `cortex deploy`

```bash
$ cortex deploy --env aws
$ cortex deploy

creating iris-classifier-onnx (RealtimeAPI)
creating iris-classifier-tf (RealtimeAPI)
Expand All @@ -23,18 +23,18 @@ created iris-classifier (TrafficSplitter)
```bash
$ cortex get

env realtime api status up-to-date requested last update avg request 2XX
aws iris-classifier-onnx updating 0 1 27s - -
aws iris-classifier-tf updating 0 1 27s - -
env realtime api status up-to-date requested last update avg request 2XX
cortex iris-classifier-onnx updating 0 1 27s - -
cortex iris-classifier-tf updating 0 1 27s - -

env traffic splitter apis last update
aws iris-classifier iris-classifier-onnx:30 iris-classifier-tf:70 27s
cortex iris-classifier iris-classifier-onnx:30 iris-classifier-tf:70 27s
```

## `cortex get iris-classifier`

```bash
$ cortex get iris-classifier --env aws
$ cortex get iris-classifier

apis weights status requested last update avg request 2XX 5XX
iris-classifier-onnx 30 live 1 1m - - -
Expand Down Expand Up @@ -73,10 +73,7 @@ setosa
Notice the requests being routed to the different Realtime APIs based on their weights (the output below may not match yours):

```bash
$ cortex get iris-classifier --env aws

using aws environment

$ cortex get iris-classifier

apis weights status requested last update avg request 2XX 5XX
iris-classifier-onnx 30 live 1 4m 6.00791 ms 1 -
Expand All @@ -93,15 +90,15 @@ example curl: curl http://***.elb.us-west-2.amazonaws.com/iris-classifier -X POS
Use `cortex delete <api_name>` to delete the Traffic Splitter and the two Realtime APIs (note that the Traffic Splitter and each Realtime API must be deleted by separate `cortex delete` commands):

```bash
$ cortex delete iris-classifier --env aws
$ cortex delete iris-classifier

deleting iris-classifier

$ cortex delete iris-classifier-onnx --env aws
$ cortex delete iris-classifier-onnx

deleting iris-classifier-onnx

$ cortex delete iris-classifier-tf --env aws
$ cortex delete iris-classifier-tf

deleting iris-classifier-tf
```
Expand Down