Skip to content

Commit 7fc4a22

Browse files
authored
chore: fix sample and integration tests (googleapis#1432)
Removes metadata integration tests which supported Preview experiment creation.
1 parent 54cfac6 commit 7fc4a22

File tree

11 files changed

+111
-115
lines changed

11 files changed

+111
-115
lines changed

.github/CODEOWNERS

Lines changed: 5 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -3,29 +3,10 @@
33
#
44
# For syntax help see:
55
# https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax
6+
# Note: This file is autogenerated. To make changes to the codeowner team, please update .repo-metadata.json.
67

7-
# @googleapis/cdpe-cloudai and yoshi-python are the default owners
8-
* @googleapis/cdpe-cloudai @googleapis/yoshi-python
8+
# @googleapis/yoshi-python is the default owner for changes in this repo
9+
* @googleapis/yoshi-python
910

10-
# The AI Platform GAPIC libraries are owned by Cloud AI DPE
11-
/google/cloud/aiplatform_*/** @googleapis/cdpe-cloudai
12-
13-
# The Vertex SDK is owned by Vertex SDK Dev team
14-
/google/cloud/aiplatform/** @googleapis/cloud-aiplatform-model-builder-sdk
15-
/tests/system/aiplatform/** @googleapis/cloud-aiplatform-model-builder-sdk
16-
/tests/unit/aiplatform/** @googleapis/cloud-aiplatform-model-builder-sdk
17-
18-
# The Cloud AI DPE team is the default owner for samples
19-
/samples/**/*.py @googleapis/cdpe-cloudai @googleapis/python-samples-reviewers
20-
/.sample_configs/** @googleapis/cdpe-cloudai
21-
22-
# The enhanced client library tests are owned by Cloud AI DPE
23-
/tests/unit/enhanced_library/*.py @googleapis/cdpe-cloudai
24-
25-
# Core library files owned by Cloud AI DPE and Vertex SDK Dev teams
26-
CHANGELOG.md @googleapis/cloud-aiplatform-model-builder-sdk @googleapis/cdpe-cloudai
27-
README.rst @googleapis/cloud-aiplatform-model-builder-sdk @googleapis/cdpe-cloudai
28-
setup.py @googleapis/cloud-aiplatform-model-builder-sdk @googleapis/cdpe-cloudai
29-
30-
# Vertex AI product team-specific ownership
31-
/google/cloud/aiplatform/constants/prediction.py @googleapis/vertex-prediction-team
11+
# @googleapis/python-samples-reviewers is the default owner for samples changes
12+
/samples/ @googleapis/python-samples-reviewers

.kokoro/samples/python3.7/periodic.cfg

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,5 +2,5 @@
22

33
env_vars: {
44
key: "INSTALL_LIBRARY_FROM_SOURCE"
5-
value: "False"
5+
value: "True"
66
}

.kokoro/samples/python3.8/periodic.cfg

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,5 +2,5 @@
22

33
env_vars: {
44
key: "INSTALL_LIBRARY_FROM_SOURCE"
5-
value: "False"
5+
value: "True"
66
}

google/cloud/aiplatform/metadata/experiment_run_resource.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,12 @@ def __init__(
118118
credentials set in aiplatform.init.
119119
"""
120120

121-
self._experiment = self._get_experiment(experiment=experiment)
121+
self._experiment = self._get_experiment(
122+
experiment=experiment,
123+
project=project,
124+
location=location,
125+
credentials=credentials,
126+
)
122127
self._run_name = run_name
123128

124129
run_id = _format_experiment_run_resource_id(

google/cloud/aiplatform/metadata/resource.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -218,7 +218,7 @@ def _nested_update_metadata(
218218
if gca_resource.metadata:
219219
for key, value in metadata.items():
220220
# Note: This only support nested dictionaries one level deep
221-
if isinstance(value, collections.Mapping):
221+
if isinstance(value, collections.abc.Mapping):
222222
gca_resource.metadata[key].update(value)
223223
else:
224224
gca_resource.metadata[key] = value

owlbot.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,13 @@
9898
".coveragerc",
9999
".kokoro/continuous/common.cfg",
100100
".kokoro/presubmit/presubmit.cfg",
101+
# exclude sample configs so periodic samples are tested against main
102+
# instead of pypi
103+
".kokoro/samples/python3.6/periodic.cfg",
104+
".kokoro/samples/python3.7/periodic.cfg",
105+
".kokoro/samples/python3.8/periodic.cfg",
106+
".kokoro/samples/python3.9/periodic.cfg",
107+
".kokoro/samples/python3.10/periodic.cfg",
101108
".github/CODEOWNERS",
102109
".github/workflows", # exclude gh actions as credentials are needed for tests
103110
],

samples/model-builder/experiment_tracking/assign_artifact_as_execution_input_sample.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,4 @@ def assign_artifact_as_execution_input_sample(
2121
artifact: aiplatform.Artifact,
2222
):
2323
execution.assign_input_artifacts([artifact])
24-
25-
2624
# [END aiplatform_sdk_assign_artifact_as_execution_input_sample]
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
google-cloud-aiplatform
1+
google-cloud-aiplatform

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@
7272
testing_extra_require = (
7373
full_extra_require
7474
+ profiler_extra_require
75-
+ ["grpcio-testing", "pytest-xdist", "ipython"]
75+
+ ["grpcio-testing", "pytest-xdist", "ipython", "kfp"]
7676
)
7777

7878

tests/system/aiplatform/test_experiments.py

Lines changed: 88 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,8 @@ class TestExperiments(e2e_base.TestEndToEnd):
4646
_temp_prefix = "tmpvrtxsdk-e2e"
4747

4848
def setup_class(cls):
49-
cls._experiment_name = cls._make_display_name("experiment")[:30]
50-
cls._dataset_artifact_name = cls._make_display_name("ds-artifact")[:30]
49+
cls._experiment_name = cls._make_display_name("")[:64]
50+
cls._dataset_artifact_name = cls._make_display_name("")[:64]
5151
cls._dataset_artifact_uri = cls._make_display_name("ds-uri")
5252
cls._pipeline_job_id = cls._make_display_name("job-id")
5353

@@ -74,29 +74,63 @@ def test_create_experiment(self, shared_state):
7474
)
7575

7676
def test_get_experiment(self):
77-
experiment = aiplatform.Experiment(experiment_name=self._experiment_name)
77+
experiment = aiplatform.Experiment(
78+
experiment_name=self._experiment_name,
79+
project=e2e_base._PROJECT,
80+
location=e2e_base._LOCATION,
81+
)
7882
assert experiment.name == self._experiment_name
7983

8084
def test_start_run(self):
85+
aiplatform.init(
86+
project=e2e_base._PROJECT,
87+
location=e2e_base._LOCATION,
88+
experiment=self._experiment_name,
89+
)
8190
run = aiplatform.start_run(_RUN)
8291
assert run.name == _RUN
8392

8493
def test_get_run(self):
85-
run = aiplatform.ExperimentRun(run_name=_RUN, experiment=self._experiment_name)
94+
run = aiplatform.ExperimentRun(
95+
run_name=_RUN,
96+
experiment=self._experiment_name,
97+
project=e2e_base._PROJECT,
98+
location=e2e_base._LOCATION,
99+
)
86100
assert run.name == _RUN
87101
assert run.state == aiplatform.gapic.Execution.State.RUNNING
88102

89103
def test_log_params(self):
104+
aiplatform.init(
105+
project=e2e_base._PROJECT,
106+
location=e2e_base._LOCATION,
107+
experiment=self._experiment_name,
108+
)
109+
aiplatform.start_run(_RUN, resume=True)
90110
aiplatform.log_params(_PARAMS)
91111
run = aiplatform.ExperimentRun(run_name=_RUN, experiment=self._experiment_name)
92112
assert run.get_params() == _PARAMS
93113

94114
def test_log_metrics(self):
115+
aiplatform.init(
116+
project=e2e_base._PROJECT,
117+
location=e2e_base._LOCATION,
118+
experiment=self._experiment_name,
119+
)
120+
aiplatform.start_run(_RUN, resume=True)
95121
aiplatform.log_metrics(_METRICS)
96122
run = aiplatform.ExperimentRun(run_name=_RUN, experiment=self._experiment_name)
97123
assert run.get_metrics() == _METRICS
98124

99125
def test_log_time_series_metrics(self):
126+
aiplatform.init(
127+
project=e2e_base._PROJECT,
128+
location=e2e_base._LOCATION,
129+
experiment=self._experiment_name,
130+
)
131+
132+
aiplatform.start_run(_RUN, resume=True)
133+
100134
for i in range(5):
101135
aiplatform.log_time_series_metrics({_TIME_SERIES_METRIC_KEY: i})
102136

@@ -116,26 +150,41 @@ def test_create_artifact(self, shared_state):
116150
schema_title="system.Dataset",
117151
resource_id=self._dataset_artifact_name,
118152
uri=self._dataset_artifact_uri,
153+
project=e2e_base._PROJECT,
154+
location=e2e_base._LOCATION,
119155
)
120156

121157
shared_state["resources"].append(ds)
122158
assert ds.uri == self._dataset_artifact_uri
123159

124160
def test_get_artifact_by_uri(self):
125-
ds = aiplatform.Artifact.get_with_uri(uri=self._dataset_artifact_uri)
161+
ds = aiplatform.Artifact.get_with_uri(
162+
uri=self._dataset_artifact_uri,
163+
project=e2e_base._PROJECT,
164+
location=e2e_base._LOCATION,
165+
)
126166

127167
assert ds.uri == self._dataset_artifact_uri
128168
assert ds.name == self._dataset_artifact_name
129169

130170
def test_log_execution_and_artifact(self, shared_state):
171+
aiplatform.init(
172+
project=e2e_base._PROJECT,
173+
location=e2e_base._LOCATION,
174+
experiment=self._experiment_name,
175+
)
176+
aiplatform.start_run(_RUN, resume=True)
177+
131178
with aiplatform.start_execution(
132179
schema_title="system.ContainerExecution",
133180
resource_id=self._make_display_name("execution"),
134181
) as execution:
135182

136183
shared_state["resources"].append(execution)
137184

138-
ds = aiplatform.Artifact(artifact_name=self._dataset_artifact_name)
185+
ds = aiplatform.Artifact(
186+
artifact_name=self._dataset_artifact_name,
187+
)
139188
execution.assign_input_artifacts([ds])
140189

141190
model = aiplatform.Artifact.create(schema_title="system.Model")
@@ -188,11 +237,22 @@ def test_log_execution_and_artifact(self, shared_state):
188237
)
189238

190239
def test_end_run(self):
240+
aiplatform.init(
241+
project=e2e_base._PROJECT,
242+
location=e2e_base._LOCATION,
243+
experiment=self._experiment_name,
244+
)
245+
aiplatform.start_run(_RUN, resume=True)
191246
aiplatform.end_run()
192247
run = aiplatform.ExperimentRun(run_name=_RUN, experiment=self._experiment_name)
193248
assert run.state == aiplatform.gapic.Execution.State.COMPLETE
194249

195250
def test_run_context_manager(self):
251+
aiplatform.init(
252+
project=e2e_base._PROJECT,
253+
location=e2e_base._LOCATION,
254+
experiment=self._experiment_name,
255+
)
196256
with aiplatform.start_run(_RUN_2) as run:
197257
run.log_params(_PARAMS_2)
198258
run.log_metrics(_METRICS_2)
@@ -226,15 +286,25 @@ def pipeline(learning_rate: float, dropout_rate: float):
226286
job_id=self._pipeline_job_id,
227287
pipeline_root=f'gs://{shared_state["staging_bucket_name"]}',
228288
parameter_values={"learning_rate": 0.1, "dropout_rate": 0.2},
289+
project=e2e_base._PROJECT,
290+
location=e2e_base._LOCATION,
229291
)
230292

231-
job.submit(experiment=self._experiment_name)
293+
job.submit(
294+
experiment=self._experiment_name,
295+
)
232296

233297
shared_state["resources"].append(job)
234298

235299
job.wait()
236300

237301
def test_get_experiments_df(self):
302+
aiplatform.init(
303+
project=e2e_base._PROJECT,
304+
location=e2e_base._LOCATION,
305+
experiment=self._experiment_name,
306+
)
307+
238308
df = aiplatform.get_experiment_df()
239309

240310
pipelines_param_and_metrics = {
@@ -264,8 +334,6 @@ def test_get_experiments_df(self):
264334
true_df_dict_2["run_type"] = aiplatform.metadata.constants.SYSTEM_EXPERIMENT_RUN
265335
true_df_dict_2[f"time_series_metric.{_TIME_SERIES_METRIC_KEY}"] = 0.0
266336

267-
# TODO(remove when running CI)
268-
269337
true_df_dict_3 = {
270338
"experiment_name": self._experiment_name,
271339
"run_name": self._pipeline_job_id,
@@ -292,14 +360,23 @@ def test_get_experiments_df(self):
292360
) == sorted(df.fillna(0.0).to_dict("records"), key=lambda d: d["run_name"])
293361

294362
def test_delete_run(self):
295-
run = aiplatform.ExperimentRun(run_name=_RUN, experiment=self._experiment_name)
363+
run = aiplatform.ExperimentRun(
364+
run_name=_RUN,
365+
experiment=self._experiment_name,
366+
project=e2e_base._PROJECT,
367+
location=e2e_base._LOCATION,
368+
)
296369
run.delete(delete_backing_tensorboard_run=True)
297370

298371
with pytest.raises(exceptions.NotFound):
299372
aiplatform.ExperimentRun(run_name=_RUN, experiment=self._experiment_name)
300373

301374
def test_delete_experiment(self):
302-
experiment = aiplatform.Experiment(experiment_name=self._experiment_name)
375+
experiment = aiplatform.Experiment(
376+
experiment_name=self._experiment_name,
377+
project=e2e_base._PROJECT,
378+
location=e2e_base._LOCATION,
379+
)
303380
experiment.delete(delete_backing_tensorboard_runs=True)
304381

305382
with pytest.raises(exceptions.NotFound):

0 commit comments

Comments
 (0)