Skip to content

Commit 0a1e2c2

Browse files
authored
CaaS - test APIs and E2E tests (#2191)
1 parent 079c20c commit 0a1e2c2

File tree

340 files changed

+1391
-10544
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

340 files changed

+1391
-10544
lines changed

Makefile

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -124,8 +124,7 @@ async-gateway-update:
124124
@./dev/registry.sh update-single async-gateway
125125
@kubectl delete pods -l cortex.dev/async=gateway --namespace=default
126126

127-
# Docker images
128-
127+
# docker images
129128
images-all:
130129
@./dev/registry.sh update all
131130
images-all-skip-push:
@@ -136,15 +135,8 @@ images-dev:
136135
images-dev-skip-push:
137136
@./dev/registry.sh update dev --skip-push
138137

139-
images-api:
140-
@./dev/registry.sh update api
141-
images-api-skip-push:
142-
@./dev/registry.sh update api --skip-push
143-
144138
images-manager-skip-push:
145139
@./dev/registry.sh update-single manager --skip-push
146-
images-iris:
147-
@./dev/registry.sh update-single python-handler-cpu
148140

149141
registry-create:
150142
@./dev/registry.sh create
@@ -179,6 +171,12 @@ test-go:
179171
test-python:
180172
@./build/test.sh python
181173

174+
175+
# build test api images
176+
# the DOCKER_PASSWORD and DOCKER_USERNAME vars to the quay repo are required
177+
build-and-push-test-images:
178+
@./test/utils/build-and-push-images.sh quay.io
179+
182180
# run e2e tests on an existing cluster
183181
# read test/e2e/README.md for instructions first
184182
test-e2e:

build/build-image.sh

Lines changed: 1 addition & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -26,16 +26,4 @@ image=$1
2626
if [ "$image" == "inferentia" ]; then
2727
aws ecr get-login-password --region us-west-2 | docker login --username AWS --password-stdin 790709498068.dkr.ecr.us-west-2.amazonaws.com
2828
fi
29-
30-
build_args=""
31-
32-
if [ "${image}" == "python-handler-gpu" ]; then
33-
cuda=("10.0" "10.1" "10.1" "10.2" "10.2" "11.0" "11.1")
34-
cudnn=("7" "7" "8" "7" "8" "8" "8")
35-
for i in ${!cudnn[@]}; do
36-
build_args="${build_args} --build-arg CUDA_VERSION=${cuda[$i]} --build-arg CUDNN=${cudnn[$i]}"
37-
docker build "$ROOT" -f $ROOT/images/$image/Dockerfile $build_args -t quay.io/cortexlabs/${image}:${CORTEX_VERSION}-cuda${cuda[$i]}-cudnn${cudnn[$i]} -t cortexlabs/${image}:${CORTEX_VERSION}-cuda${cuda[$i]}-cudnn${cudnn[$i]}
38-
done
39-
else
40-
docker build "$ROOT" -f $ROOT/images/$image/Dockerfile $build_args -t quay.io/cortexlabs/${image}:${CORTEX_VERSION} -t cortexlabs/${image}:${CORTEX_VERSION}
41-
fi
29+
docker build "$ROOT" -f $ROOT/images/$image/Dockerfile -t quay.io/cortexlabs/${image}:${CORTEX_VERSION} -t cortexlabs/${image}:${CORTEX_VERSION}

build/images.sh

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -19,13 +19,6 @@
1919

2020
set -euo pipefail
2121

22-
api_images=(
23-
"python-handler-cpu"
24-
"python-handler-gpu"
25-
"tensorflow-handler"
26-
"python-handler-inf"
27-
)
28-
2922
dev_images=(
3023
"manager"
3124
"proxy"
@@ -61,7 +54,6 @@ non_dev_images=(
6154
)
6255

6356
all_images=(
64-
"${api_images[@]}"
6557
"${dev_images[@]}"
6658
"${non_dev_images[@]}"
6759
)

build/push-image.sh

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -23,13 +23,4 @@ host=$1
2323
image=$2
2424

2525
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
26-
27-
if [ "$image" == "python-handler-gpu" ]; then
28-
cuda=("10.0" "10.1" "10.1" "10.2" "10.2" "11.0" "11.1")
29-
cudnn=("7" "7" "8" "7" "8" "8" "8")
30-
for i in ${!cudnn[@]}; do
31-
docker push $host/cortexlabs/${image}:${CORTEX_VERSION}-cuda${cuda[$i]}-cudnn${cudnn[$i]}
32-
done
33-
else
34-
docker push $host/cortexlabs/${image}:${CORTEX_VERSION}
35-
fi
26+
docker push $host/cortexlabs/${image}:${CORTEX_VERSION}

dev/registry.sh

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -108,15 +108,13 @@ function build() {
108108
local tag=$2
109109
local dir="${ROOT}/images/${image}"
110110

111-
build_args=""
112-
113111
tag_args=""
114112
if [ -n "$AWS_ACCOUNT_ID" ] && [ -n "$AWS_REGION" ]; then
115113
tag_args+=" -t $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/cortexlabs/$image:$tag"
116114
fi
117115

118116
blue_echo "Building $image:$tag..."
119-
docker build $ROOT -f $dir/Dockerfile -t cortexlabs/$image:$tag $tag_args $build_args
117+
docker build $ROOT -f $dir/Dockerfile -t cortexlabs/$image:$tag $tag_args
120118
green_echo "Built $image:$tag\n"
121119
}
122120

@@ -150,10 +148,6 @@ function build_and_push() {
150148
set -euo pipefail # necessary since this is called in a new shell by parallel
151149

152150
tag=$CORTEX_VERSION
153-
if [ "${image}" == "python-handler-gpu" ]; then
154-
tag="${CORTEX_VERSION}-cuda10.2-cudnn8"
155-
fi
156-
157151
build $image $tag
158152
push $image $tag
159153
}
@@ -240,8 +234,6 @@ elif [ "$cmd" = "update" ]; then
240234
images_to_build+=( "${dev_images[@]}" )
241235
fi
242236

243-
images_to_build+=( "${api_images[@]}" )
244-
245237
if [[ " ${images_to_build[@]} " =~ " operator " ]]; then
246238
cache_builder operator
247239
fi

test/apis/README.md

Lines changed: 0 additions & 67 deletions
This file was deleted.

test/apis/async/iris-classifier/cortex.yaml

Lines changed: 0 additions & 11 deletions
This file was deleted.

test/apis/async/iris-classifier/expectations.yaml

Lines changed: 0 additions & 10 deletions
This file was deleted.

test/apis/async/iris-classifier/handler.py

Lines changed: 0 additions & 26 deletions
This file was deleted.

test/apis/async/iris-classifier/requirements.txt

Lines changed: 0 additions & 2 deletions
This file was deleted.

test/apis/async/iris-classifier/sample.json

Lines changed: 0 additions & 6 deletions
This file was deleted.

test/apis/async/tensorflow/cortex.yaml

Lines changed: 0 additions & 7 deletions
This file was deleted.

test/apis/async/tensorflow/handler.py

Lines changed: 0 additions & 11 deletions
This file was deleted.

test/apis/async/tensorflow/sample.json

Lines changed: 0 additions & 6 deletions
This file was deleted.
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
*.dockerfile
2+
README.md
3+
sample.json
4+
expectations.yaml
5+
*.pyc
6+
*.pyo
7+
*.pyd
8+
__pycache__
9+
.pytest_cache
Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
- name: text-generator
2+
kind: AsyncAPI
3+
pod:
4+
port: 9000
5+
containers:
6+
- name: api
7+
image: quay.io/cortexlabs-test/async-text-generator-cpu:latest
8+
readiness_probe:
9+
http_get:
10+
path: "/healthz"
11+
port: 9000
12+
compute:
13+
cpu: 1
14+
mem: 2.5G
15+
autoscaling:
16+
max_concurrency: 1
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
- name: text-generator
2+
kind: AsyncAPI
3+
pod:
4+
port: 9000
5+
containers:
6+
- name: api
7+
image: quay.io/cortexlabs-test/async-text-generator-gpu:latest
8+
env:
9+
TARGET_DEVICE: "cuda"
10+
readiness_probe:
11+
http_get:
12+
path: "/healthz"
13+
port: 9000
14+
compute:
15+
cpu: 1
16+
gpu: 1
17+
mem: 512M
18+
autoscaling:
19+
max_concurrency: 1

test/apis/async/tensorflow/expectations.yaml renamed to test/apis/async/text-generator/expectations.yaml

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,7 @@ response:
33
json_schema:
44
type: "object"
55
properties:
6-
label:
6+
prediction:
77
type: "string"
8-
const: "setosa"
98
required:
10-
- "label"
9+
- "prediction"
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
import os
2+
3+
from fastapi import FastAPI, Response, status
4+
from pydantic import BaseModel
5+
from transformers import GPT2Tokenizer, GPT2LMHeadModel
6+
7+
8+
class Request(BaseModel):
9+
text: str
10+
11+
12+
state = {
13+
"model_ready": False,
14+
"tokenizer": None,
15+
"model": None,
16+
}
17+
device = os.getenv("TARGET_DEVICE", "cpu")
18+
app = FastAPI()
19+
20+
21+
@app.on_event("startup")
22+
def startup():
23+
global state
24+
state["tokenizer"] = GPT2Tokenizer.from_pretrained("gpt2")
25+
state["model"] = GPT2LMHeadModel.from_pretrained("gpt2").to(device)
26+
state["model_ready"] = True
27+
28+
29+
@app.get("/healthz")
30+
def healthz(response: Response):
31+
if not state["model_ready"]:
32+
response.status_code = status.HTTP_503_SERVICE_UNAVAILABLE
33+
34+
35+
@app.post("/")
36+
def text_generator(request: Request):
37+
input_length = len(request.text.split())
38+
tokens = state["tokenizer"].encode(request.text, return_tensors="pt").to(device)
39+
prediction = state["model"].generate(tokens, max_length=input_length + 20, do_sample=True)
40+
return {
41+
"prediction": state["tokenizer"].decode(prediction[0]),
42+
}
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
{
2+
"text": "machine learning is"
3+
}

0 commit comments

Comments
 (0)