Skip to content
This repository was archived by the owner on Nov 29, 2023. It is now read-only.

Commit 59b00aa

Browse files
feat: add optional parameters (tarball_gcs_dir, diagnosis_interval, jobs, yarn_application_ids) in DiagnoseClusterRequest (#560)
* feat: add optional parameters (tarball_gcs_dir, diagnosis_interval, jobs, yarn_application_ids) in DiagnoseClusterRequest PiperOrigin-RevId: 565501215 Source-Link: googleapis/googleapis@6b95655 Source-Link: https://github.com/googleapis/googleapis-gen/commit/caf4b28f7522ef21bcc006bcb8c9b6cd6d2231c4 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiY2FmNGIyOGY3NTIyZWYyMWJjYzAwNmJjYjhjOWI2Y2Q2ZDIyMzFjNCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>
1 parent cbae958 commit 59b00aa

File tree

4 files changed

+53
-24
lines changed

4 files changed

+53
-24
lines changed

google/cloud/dataproc_v1/types/clusters.py

Lines changed: 50 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
from google.protobuf import duration_pb2 # type: ignore
2121
from google.protobuf import field_mask_pb2 # type: ignore
2222
from google.protobuf import timestamp_pb2 # type: ignore
23+
from google.type import interval_pb2 # type: ignore
2324
import proto # type: ignore
2425

2526
from google.cloud.dataproc_v1.types import shared
@@ -837,26 +838,20 @@ class InstanceGroupConfig(proto.Message):
837838
Instance Group. See `Dataproc -> Minimum CPU
838839
Platform <https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu>`__.
839840
min_num_instances (int):
840-
Optional. The minimum number of instances to create. If
841-
min_num_instances is set, min_num_instances is used for a
842-
criteria to decide the cluster. Cluster creation will be
843-
failed by being an error state if the total number of
844-
instances created is less than the min_num_instances. For
845-
example, given that num_instances = 5 and min_num_instances
846-
= 3,
847-
848-
- if 4 instances are created and then registered
849-
successfully but one instance is failed, the failed VM
850-
will be deleted and the cluster will be resized to 4
851-
instances in running state.
852-
- if 2 instances are created successfully and 3 instances
853-
are failed, the cluster will be in an error state and
854-
does not delete failed VMs for debugging.
855-
- if 2 instance are created and then registered
856-
successfully but 3 instances are failed to initialize,
857-
the cluster will be in an error state and does not delete
858-
failed VMs for debugging. NB: This can only be set for
859-
primary workers now.
841+
Optional. The minimum number of primary worker instances to
842+
create. If ``min_num_instances`` is set, cluster creation
843+
will succeed if the number of primary workers created is at
844+
least equal to the ``min_num_instances`` number.
845+
846+
Example: Cluster creation request with ``num_instances`` =
847+
``5`` and ``min_num_instances`` = ``3``:
848+
849+
- If 4 VMs are created and 1 instance fails, the failed VM
850+
is deleted. The cluster is resized to 4 instances and
851+
placed in a ``RUNNING`` state.
852+
- If 2 instances are created and 3 instances fail, the
853+
cluster in placed in an ``ERROR`` state. The failed VMs
854+
are not deleted.
860855
instance_flexibility_policy (google.cloud.dataproc_v1.types.InstanceFlexibilityPolicy):
861856
Optional. Instance flexibility Policy
862857
allowing a mixture of VM shapes and provisioning
@@ -1251,13 +1246,13 @@ class NodeGroup(proto.Message):
12511246
"""
12521247

12531248
class Role(proto.Enum):
1254-
r"""Node group roles.
1249+
r"""Node pool roles.
12551250
12561251
Values:
12571252
ROLE_UNSPECIFIED (0):
12581253
Required unspecified role.
12591254
DRIVER (1):
1260-
Job drivers run on the node group.
1255+
Job drivers run on the node pool.
12611256
"""
12621257
ROLE_UNSPECIFIED = 0
12631258
DRIVER = 1
@@ -2359,6 +2354,22 @@ class DiagnoseClusterRequest(proto.Message):
23592354
handle the request.
23602355
cluster_name (str):
23612356
Required. The cluster name.
2357+
tarball_gcs_dir (str):
2358+
Optional. The output Cloud Storage directory
2359+
for the diagnostic tarball. If not specified, a
2360+
task-specific directory in the cluster's staging
2361+
bucket will be used.
2362+
diagnosis_interval (google.type.interval_pb2.Interval):
2363+
Optional. Time interval in which diagnosis
2364+
should be carried out on the cluster.
2365+
jobs (MutableSequence[str]):
2366+
Optional. Specifies a list of jobs on which
2367+
diagnosis is to be performed. Format:
2368+
projects/{project}/regions/{region}/jobs/{job}
2369+
yarn_application_ids (MutableSequence[str]):
2370+
Optional. Specifies a list of yarn
2371+
applications on which diagnosis is to be
2372+
performed.
23622373
"""
23632374

23642375
project_id: str = proto.Field(
@@ -2373,6 +2384,23 @@ class DiagnoseClusterRequest(proto.Message):
23732384
proto.STRING,
23742385
number=2,
23752386
)
2387+
tarball_gcs_dir: str = proto.Field(
2388+
proto.STRING,
2389+
number=4,
2390+
)
2391+
diagnosis_interval: interval_pb2.Interval = proto.Field(
2392+
proto.MESSAGE,
2393+
number=6,
2394+
message=interval_pb2.Interval,
2395+
)
2396+
jobs: MutableSequence[str] = proto.RepeatedField(
2397+
proto.STRING,
2398+
number=10,
2399+
)
2400+
yarn_application_ids: MutableSequence[str] = proto.RepeatedField(
2401+
proto.STRING,
2402+
number=11,
2403+
)
23762404

23772405

23782406
class DiagnoseClusterResults(proto.Message):

samples/generated_samples/snippet_metadata_google.cloud.dataproc.v1.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
],
99
"language": "PYTHON",
1010
"name": "google-cloud-dataproc",
11-
"version": "5.5.1"
11+
"version": "0.1.0"
1212
},
1313
"snippets": [
1414
{

scripts/fixup_dataproc_v1_keywords.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ class dataprocCallTransformer(cst.CSTTransformer):
5050
'delete_cluster': ('project_id', 'region', 'cluster_name', 'cluster_uuid', 'request_id', ),
5151
'delete_job': ('project_id', 'region', 'job_id', ),
5252
'delete_workflow_template': ('name', 'version', ),
53-
'diagnose_cluster': ('project_id', 'region', 'cluster_name', ),
53+
'diagnose_cluster': ('project_id', 'region', 'cluster_name', 'tarball_gcs_dir', 'diagnosis_interval', 'jobs', 'yarn_application_ids', ),
5454
'get_autoscaling_policy': ('name', ),
5555
'get_batch': ('name', ),
5656
'get_cluster': ('project_id', 'region', 'cluster_name', ),

tests/unit/gapic/dataproc_v1/test_cluster_controller.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@
5151
from google.protobuf import field_mask_pb2 # type: ignore
5252
from google.protobuf import json_format
5353
from google.protobuf import timestamp_pb2 # type: ignore
54+
from google.type import interval_pb2 # type: ignore
5455
import grpc
5556
from grpc.experimental import aio
5657
from proto.marshal.rules import wrappers

0 commit comments

Comments
 (0)