forked from build-on-aws/amazon-bedrock-quick-start
-
Notifications
You must be signed in to change notification settings - Fork 0
/
text_examples.py
132 lines (105 loc) · 4.05 KB
/
text_examples.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
import boto3
import json
# Setup bedrock
bedrock_runtime = boto3.client(
service_name="bedrock-runtime",
region_name="us-east-1",
)
def claude_prompt_format(prompt: str) -> str:
# Add headers to start and end of prompt
return "\n\nHuman: " + prompt + "\n\nAssistant:"
# Call AI21 labs model
def run_mid(prompt):
prompt_config = {
"prompt": prompt,
"maxTokens": 5147,
"temperature": 0.7,
"stopSequences": [],
}
body = json.dumps(prompt_config)
modelId = "ai21.j2-mid"
accept = "application/json"
contentType = "application/json"
response = bedrock_runtime.invoke_model(
body=body, modelId=modelId, accept=accept, contentType=contentType
)
response_body = json.loads(response.get("body").read())
results = response_body.get("completions")[0].get("data").get("text")
return results
# Call Claude model
def call_claude(prompt):
prompt_config = {
"prompt": claude_prompt_format(prompt),
"max_tokens_to_sample": 4096,
"temperature": 0.5,
"top_k": 250,
"top_p": 0.5,
"stop_sequences": [],
}
body = json.dumps(prompt_config)
modelId = "anthropic.claude-v2"
accept = "application/json"
contentType = "application/json"
response = bedrock_runtime.invoke_model(
body=body, modelId=modelId, accept=accept, contentType=contentType
)
response_body = json.loads(response.get("body").read())
results = response_body.get("completion")
return results
# Call Cohere model
def call_cohere(prompt):
prompt_config = {
"prompt": prompt,
"max_tokens": 2048,
"temperature": 0.7,
# "return_likelihood": "GENERATION"
}
body = json.dumps(prompt_config)
modelId = "cohere.command-text-v14"
accept = "application/json"
contentType = "application/json"
response = bedrock_runtime.invoke_model(
body=body, modelId=modelId, accept=accept, contentType=contentType
)
response_body = json.loads(response.get("body").read())
results = response_body.get("generations")[0].get("text")
return results
def summarize_text(text):
"""
Function to summarize text using a generative AI model.
"""
prompt = f"Summarize the following text: {text}"
result = run_mid(prompt) # Assuming run_mid is the function that executes the model
return result
def generate_code():
"""
Function to generate Python code for uploading a file to Amazon S3.
"""
prompt = "Write a Python function that uploads a file to Amazon S3"
result = call_claude(
prompt
) # Assuming call_claude is the function that executes the model
return result
def perform_qa(text):
"""
Function to perform a Q&A operation based on the provided text.
"""
prompt = (
f"How many models does Amazon Bedrock support given the following text: {text}"
)
result = call_cohere(
prompt
) # Assuming call_cohere is the function that executes the model
return result
if __name__ == "__main__":
# Sample text for summarization
text = "This April, we announced Amazon Bedrock as part of a set of new tools for building with generative AI on AWS. Amazon Bedrock is a fully managed service that offers a choice of high-performing foundation models (FMs) from leading AI companies, including AI21 Labs, Anthropic, Cohere, Stability AI, and Amazon, along with a broad set of capabilities to build generative AI applications, simplifying the development while maintaining privacy and security Today, I'm happy to announce that Amazon Bedrock is now generally available! I'm also excited to share that Meta's Llama 2 13B and 70B parameter models will soon be available on Amazon Bedrock."
print("\n=== Summarization Example ===")
summary = summarize_text(text)
print(f"Summary: {summary}")
print("\n=== Code Generation Example ===")
code_snippet = generate_code()
print(f"Generated Code:\n{code_snippet}")
print("\n=== Q&A Example ===")
answer = perform_qa(text)
print(f"Answer: {answer}")