forked from ray-project/ray-llm
-
Notifications
You must be signed in to change notification settings - Fork 0
/
trtllm-meta-llama--Llama-2-70b-chat-hf.yaml
39 lines (39 loc) · 1.19 KB
/
trtllm-meta-llama--Llama-2-70b-chat-hf.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
deployment_config:
autoscaling_config:
min_replicas: 1
initial_replicas: 1
max_replicas: 8
target_num_ongoing_requests_per_replica: 24
metrics_interval_s: 10.0
look_back_period_s: 30.0
smoothing_factor: 0.6
downscale_delay_s: 300.0
upscale_delay_s: 15.0
max_concurrent_queries: 64
ray_actor_options:
resources:
accelerator_type_a10: 0.01
engine_config:
model_id: meta-llama/Llama-2-70b-chat-hf
type: TRTLLMEngine
s3_mirror_config:
bucket_uri: s3://trtllm-models/llama2-70b-tp2/ # Change to your own model s3 path
# If you want to test with local file path, you can comment out s3_mirror_config section
# and add following
# model_local_path: <your local path>
generation:
prompt_format:
system: "<<SYS>>\n{instruction}\n<</SYS>>\n\n"
assistant: " {instruction} </s><s>"
trailing_assistant: ""
user: "[INST] {system}{instruction} [/INST]"
system_in_user: true
default_system_message: ""
stopping_sequences: ["<unk>"]
scaling_config:
num_workers: 8 #mpi size
num_gpus_per_worker: 1
num_cpus_per_worker: 8
placement_strategy: "STRICT_PACK"
resources_per_worker:
accelerator_type_a10: 0.01