Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: Ran nox -s blacken on samples #10231

Merged
merged 6 commits into from
Jun 15, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
🦉 Updates from OwlBot post-processor
  • Loading branch information
gcf-owl-bot[bot] committed Jun 14, 2023
commit b32c5135f30dcecef7c997c944d489d94228994c
12 changes: 6 additions & 6 deletions generative_ai/code_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,23 +16,23 @@
from vertexai.preview.language_models import CodeChatModel


def write_a_function(
temperature: float = 0.5
) -> object:
def write_a_function(temperature: float = 0.5) -> object:
"""Example of using Code Chat Model to write a function."""

# TODO developer - override these parameters as needed:
parameters = {
"temperature": temperature, # Temperature controls the degree of randomness in token selection.
"max_output_tokens": 1024, # Token limit determines the maximum amount of text output.
"max_output_tokens": 1024, # Token limit determines the maximum amount of text output.
}

code_chat_model = CodeChatModel.from_pretrained("codechat-bison@001")
chat = code_chat_model.start_chat()

response = chat.send_message("Please help write a function to calculate the min of two numbers", **parameters)
response = chat.send_message(
"Please help write a function to calculate the min of two numbers", **parameters
)
print(f"Response from Model: {response.text}")
# [END aiplatform_sdk_code_chat]
# [END aiplatform_sdk_code_chat]

return response

Expand Down
2 changes: 1 addition & 1 deletion generative_ai/code_chat_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,4 @@
@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10)
def test_code_chat() -> None:
content = code_chat.write_a_function(temperature=0).text
assert 'def min(a, b):' in content
assert "def min(a, b):" in content
12 changes: 6 additions & 6 deletions generative_ai/code_completion_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,22 +16,22 @@
from vertexai.preview.language_models import CodeGenerationModel


def complete_code_function(
temperature: float = 0.2
) -> object:
def complete_code_function(temperature: float = 0.2) -> object:
"""Example of using Code Completion to complete a function."""

# TODO developer - override these parameters as needed:
parameters = {
"temperature": temperature, # Temperature controls the degree of randomness in token selection.
"max_output_tokens": 64, # Token limit determines the maximum amount of text output.
"max_output_tokens": 64, # Token limit determines the maximum amount of text output.
}

code_completion_model = CodeGenerationModel.from_pretrained("code-gecko@001")
response = code_completion_model.predict(prefix="def reverse_string(s):", **parameters)
response = code_completion_model.predict(
prefix="def reverse_string(s):", **parameters
)

print(f"Response from Model: {response.text}")
# [END aiplatform_sdk_code_completion_comment]
# [END aiplatform_sdk_code_completion_comment]

return response

Expand Down
2 changes: 1 addition & 1 deletion generative_ai/code_completion_function_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,4 @@
@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10)
def test_code_completion_comment() -> None:
content = code_completion_function.complete_code_function(temperature=0).text
assert 'def' in content
assert "def" in content
12 changes: 6 additions & 6 deletions generative_ai/code_completion_test_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,26 +16,26 @@
from vertexai.preview.language_models import CodeGenerationModel


def complete_test_function(
temperature: float = 0.2
) -> object:
def complete_test_function(temperature: float = 0.2) -> object:
"""Example of using Code Completion to complete a test function."""

# TODO developer - override these parameters as needed:
parameters = {
"temperature": temperature, # Temperature controls the degree of randomness in token selection.
"max_output_tokens": 64, # Token limit determines the maximum amount of text output.
"max_output_tokens": 64, # Token limit determines the maximum amount of text output.
}

code_completion_model = CodeGenerationModel.from_pretrained("code-gecko@001")
response = code_completion_model.predict(
prefix="""def reverse_string(s):
return s[::-1]
def test_empty_input_string()""", **parameters)
def test_empty_input_string()""",
**parameters,
)

print(f"Response from Model: {response.text}")

# [END aiplatform_sdk_code_completion_test_function]
# [END aiplatform_sdk_code_completion_test_function]

return response

Expand Down
2 changes: 1 addition & 1 deletion generative_ai/code_completion_test_function_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,4 @@
@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10)
def test_code_completion_test_function() -> None:
content = code_completion_test_function.complete_test_function(temperature=0).text
assert '-> None:' in content
assert "-> None:" in content
12 changes: 6 additions & 6 deletions generative_ai/code_generation_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,22 +16,22 @@
from vertexai.preview.language_models import CodeGenerationModel


def generate_a_function(
temperature: float = 0.5
) -> object:
def generate_a_function(temperature: float = 0.5) -> object:
"""Example of using Code Generation to write a function."""

# TODO developer - override these parameters as needed:
parameters = {
"temperature": temperature, # Temperature controls the degree of randomness in token selection.
"max_output_tokens": 256, # Token limit determines the maximum amount of text output.
"max_output_tokens": 256, # Token limit determines the maximum amount of text output.
}

code_generation_model = CodeGenerationModel.from_pretrained("code-bison@001")
response = code_generation_model.predict(prefix="Write a function that checks if a year is a leap year.", **parameters)
response = code_generation_model.predict(
prefix="Write a function that checks if a year is a leap year.", **parameters
)

print(f"Response from Model: {response.text}")
# [END aiplatform_sdk_code_generation_function]
# [END aiplatform_sdk_code_generation_function]

return response

Expand Down
2 changes: 1 addition & 1 deletion generative_ai/code_generation_function_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,4 @@
@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10)
def test_code_generation_function() -> None:
content = code_generation_function.generate_a_function(temperature=0).text
assert 'Divide the year by 4.' in content
assert "Divide the year by 4." in content
17 changes: 10 additions & 7 deletions generative_ai/code_generation_unittest.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,19 +18,19 @@
from vertexai.preview.language_models import CodeGenerationModel


def generate_unittest(
temperature: float = 0.5
) -> object:
def generate_unittest(temperature: float = 0.5) -> object:
"""Example of using Code Generation to write a unit test."""

# TODO developer - override these parameters as needed:
parameters = {
"temperature": temperature, # Temperature controls the degree of randomness in token selection.
"max_output_tokens": 256, # Token limit determines the maximum amount of text output.
"max_output_tokens": 256, # Token limit determines the maximum amount of text output.
}

code_generation_model = CodeGenerationModel.from_pretrained("code-bison@001")
response = code_generation_model.predict(prefix=textwrap.dedent("""\
response = code_generation_model.predict(
prefix=textwrap.dedent(
"""\
Write a unit test for this function:
def is_leap_year(year):
if year % 4 == 0:
Expand All @@ -43,10 +43,13 @@ def is_leap_year(year):
return True
else:
return False
"""), **parameters)
"""
),
**parameters,
)

print(f"Response from Model: {response.text}")
# [END aiplatform_sdk_code_generation_unittest]
# [END aiplatform_sdk_code_generation_unittest]

return response

Expand Down
2 changes: 1 addition & 1 deletion generative_ai/code_generation_unittest_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,4 @@
@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10)
def test_code_generation_unittest() -> None:
content = code_generation_unittest.generate_unittest(temperature=0).text
assert 'def test_is_leap_year():' in content
assert "def test_is_leap_year():" in content