diff --git a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/autoscaling_policies.proto b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/autoscaling_policies.proto index 2bd5b7bbb1e..4ce5868d0f7 100644 --- a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/autoscaling_policies.proto +++ b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/autoscaling_policies.proto @@ -36,12 +36,10 @@ option (google.api.resource_definition) = { // Dataproc API. service AutoscalingPolicyService { option (google.api.default_host) = "dataproc.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; // Creates new autoscaling policy. - rpc CreateAutoscalingPolicy(CreateAutoscalingPolicyRequest) - returns (AutoscalingPolicy) { + rpc CreateAutoscalingPolicy(CreateAutoscalingPolicyRequest) returns (AutoscalingPolicy) { option (google.api.http) = { post: "/v1/{parent=projects/*/locations/*}/autoscalingPolicies" body: "policy" @@ -57,8 +55,7 @@ service AutoscalingPolicyService { // // Disabled check for update_mask, because all updates will be full // replacements. - rpc UpdateAutoscalingPolicy(UpdateAutoscalingPolicyRequest) - returns (AutoscalingPolicy) { + rpc UpdateAutoscalingPolicy(UpdateAutoscalingPolicyRequest) returns (AutoscalingPolicy) { option (google.api.http) = { put: "/v1/{policy.name=projects/*/locations/*/autoscalingPolicies/*}" body: "policy" @@ -71,8 +68,7 @@ service AutoscalingPolicyService { } // Retrieves autoscaling policy. - rpc GetAutoscalingPolicy(GetAutoscalingPolicyRequest) - returns (AutoscalingPolicy) { + rpc GetAutoscalingPolicy(GetAutoscalingPolicyRequest) returns (AutoscalingPolicy) { option (google.api.http) = { get: "/v1/{name=projects/*/locations/*/autoscalingPolicies/*}" additional_bindings { @@ -83,8 +79,7 @@ service AutoscalingPolicyService { } // Lists autoscaling policies in the project. - rpc ListAutoscalingPolicies(ListAutoscalingPoliciesRequest) - returns (ListAutoscalingPoliciesResponse) { + rpc ListAutoscalingPolicies(ListAutoscalingPoliciesRequest) returns (ListAutoscalingPoliciesResponse) { option (google.api.http) = { get: "/v1/{parent=projects/*/locations/*}/autoscalingPolicies" additional_bindings { @@ -96,8 +91,7 @@ service AutoscalingPolicyService { // Deletes an autoscaling policy. It is an error to delete an autoscaling // policy that is in use by one or more clusters. - rpc DeleteAutoscalingPolicy(DeleteAutoscalingPolicyRequest) - returns (google.protobuf.Empty) { + rpc DeleteAutoscalingPolicy(DeleteAutoscalingPolicyRequest) returns (google.protobuf.Empty) { option (google.api.http) = { delete: "/v1/{name=projects/*/locations/*/autoscalingPolicies/*}" additional_bindings { @@ -138,31 +132,26 @@ message AutoscalingPolicy { // Autoscaling algorithm for policy. oneof algorithm { - BasicAutoscalingAlgorithm basic_algorithm = 3 - [(google.api.field_behavior) = REQUIRED]; + BasicAutoscalingAlgorithm basic_algorithm = 3 [(google.api.field_behavior) = REQUIRED]; } // Required. Describes how the autoscaler will operate for primary workers. - InstanceGroupAutoscalingPolicyConfig worker_config = 4 - [(google.api.field_behavior) = REQUIRED]; + InstanceGroupAutoscalingPolicyConfig worker_config = 4 [(google.api.field_behavior) = REQUIRED]; // Optional. Describes how the autoscaler will operate for secondary workers. - InstanceGroupAutoscalingPolicyConfig secondary_worker_config = 5 - [(google.api.field_behavior) = OPTIONAL]; + InstanceGroupAutoscalingPolicyConfig secondary_worker_config = 5 [(google.api.field_behavior) = OPTIONAL]; } // Basic algorithm for autoscaling. message BasicAutoscalingAlgorithm { // Required. YARN autoscaling configuration. - BasicYarnAutoscalingConfig yarn_config = 1 - [(google.api.field_behavior) = REQUIRED]; + BasicYarnAutoscalingConfig yarn_config = 1 [(google.api.field_behavior) = REQUIRED]; // Optional. Duration between scaling events. A scaling period starts after // the update operation from the previous event has completed. // // Bounds: [2m, 1d]. Default: 2m. - google.protobuf.Duration cooldown_period = 2 - [(google.api.field_behavior) = OPTIONAL]; + google.protobuf.Duration cooldown_period = 2 [(google.api.field_behavior) = OPTIONAL]; } // Basic autoscaling configurations for YARN. @@ -173,8 +162,7 @@ message BasicYarnAutoscalingConfig { // downscaling operations. // // Bounds: [0s, 1d]. - google.protobuf.Duration graceful_decommission_timeout = 5 - [(google.api.field_behavior) = REQUIRED]; + google.protobuf.Duration graceful_decommission_timeout = 5 [(google.api.field_behavior) = REQUIRED]; // Required. Fraction of average pending memory in the last cooldown period // for which to add workers. A scale-up factor of 1.0 will result in scaling @@ -201,8 +189,7 @@ message BasicYarnAutoscalingConfig { // on any recommended change. // // Bounds: [0.0, 1.0]. Default: 0.0. - double scale_up_min_worker_fraction = 3 - [(google.api.field_behavior) = OPTIONAL]; + double scale_up_min_worker_fraction = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. Minimum scale-down threshold as a fraction of total cluster size // before scaling occurs. For example, in a 20-worker cluster, a threshold of @@ -211,8 +198,7 @@ message BasicYarnAutoscalingConfig { // on any recommended change. // // Bounds: [0.0, 1.0]. Default: 0.0. - double scale_down_min_worker_fraction = 4 - [(google.api.field_behavior) = OPTIONAL]; + double scale_down_min_worker_fraction = 4 [(google.api.field_behavior) = OPTIONAL]; } // Configuration for the size bounds of an instance group, including its @@ -272,10 +258,8 @@ message CreateAutoscalingPolicyRequest { } ]; - // The autoscaling policy to create. - AutoscalingPolicy policy = 2 [ - (google.api.field_behavior) = REQUIRED - ]; + // Required. The autoscaling policy to create. + AutoscalingPolicy policy = 2 [(google.api.field_behavior) = REQUIRED]; } // A request to fetch an autoscaling policy. @@ -357,8 +341,7 @@ message ListAutoscalingPoliciesRequest { // A response to a request to list autoscaling policies in a project. message ListAutoscalingPoliciesResponse { // Output only. Autoscaling policies list. - repeated AutoscalingPolicy policies = 1 - [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated AutoscalingPolicy policies = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. This token is included in the response if there are more // results to fetch. diff --git a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/clusters.proto b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/clusters.proto index a20b8f31e8e..fbaf9391ed0 100644 --- a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/clusters.proto +++ b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/clusters.proto @@ -88,7 +88,7 @@ service ClusterController { option (google.api.method_signature) = "project_id,region,cluster_name"; } - // Lists all regions/{region}/clusters in a project. + // Lists all regions/{region}/clusters in a project alphabetically. rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { option (google.api.http) = { get: "/v1/projects/{project_id}/regions/{region}/clusters" @@ -150,11 +150,11 @@ message Cluster { // generates this value when it creates the cluster. string cluster_uuid = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Contains cluster daemon metrics such as HDFS and YARN stats. + // Output only. Contains cluster daemon metrics such as HDFS and YARN stats. // // **Beta Feature**: This report is available for testing purposes only. It // may be changed before final release. - ClusterMetrics metrics = 9; + ClusterMetrics metrics = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; } // The cluster config. @@ -167,7 +167,7 @@ message ClusterConfig { // Compute Engine zone where your cluster is deployed, and then create // and manage this project-level, per-location bucket (see // [Dataproc staging - // bucket](/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + // bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). string config_bucket = 1 [(google.api.field_behavior) = OPTIONAL]; // Optional. The shared Compute Engine config settings for @@ -259,7 +259,8 @@ message GceClusterConfig { // communications. Cannot be specified with subnetwork_uri. If neither // `network_uri` nor `subnetwork_uri` is specified, the "default" network of // the project is used, if it exists. Cannot be a "Custom Subnet Network" (see - // [Using Subnetworks](/compute/docs/subnetworks) for more information). + // [Using Subnetworks](https://cloud.google.com/compute/docs/subnetworks) for + // more information). // // A full URL, partial URI, or short name are valid. Examples: // @@ -287,15 +288,15 @@ message GceClusterConfig { bool internal_ip_only = 7 [(google.api.field_behavior) = OPTIONAL]; // Optional. The [Dataproc service - // account](/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_cloud_dataproc) + // account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_cloud_dataproc) // (also see [VM Data Plane - // identity](/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) + // identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) // used by Dataproc cluster VM instances to access Google Cloud Platform // services. // // If not specified, the // [Compute Engine default service - // account](/compute/docs/access/service-accounts#default_service_account) + // account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) // is used. string service_account = 8 [(google.api.field_behavior) = OPTIONAL]; @@ -315,8 +316,8 @@ message GceClusterConfig { // * https://www.googleapis.com/auth/devstorage.full_control repeated string service_account_scopes = 3 [(google.api.field_behavior) = OPTIONAL]; - // The Compute Engine tags to add to all instances (see - // [Tagging instances](/compute/docs/label-or-tag-resources#tags)). + // The Compute Engine tags to add to all instances (see [Tagging + // instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). repeated string tags = 4; // The Compute Engine metadata entries to add to all instances (see @@ -339,9 +340,24 @@ message InstanceGroupConfig { // from `cluster_name`, `num_instances`, and the instance group. repeated string instance_names = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Optional. The Compute Engine image resource used for cluster - // instances. It can be specified or may be inferred from - // `SoftwareConfig.image_version`. + // Optional. The Compute Engine image resource used for cluster instances. + // + // The URI can represent an image or image family. + // + // Image examples: + // + // * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` + // * `projects/[project_id]/global/images/[image-id]` + // * `image-id` + // + // Image family examples. Dataproc will use the most recent + // image from the family: + // + // * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` + // * `projects/[project_id]/global/images/family/[custom-image-family-name]` + // + // If the URI is unspecified, it will be inferred from + // `SoftwareConfig.image_version` or the system default. string image_uri = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. The Compute Engine machine type used for cluster instances. @@ -354,7 +370,7 @@ message InstanceGroupConfig { // // **Auto Zone Exception**: If you are using the Dataproc // [Auto Zone - // Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + // Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) // feature, you must use the short name of the machine type // resource, for example, `n1-standard-2`. string machine_type_uri = 4 [(google.api.field_behavior) = OPTIONAL]; @@ -362,9 +378,9 @@ message InstanceGroupConfig { // Optional. Disk option config settings. DiskConfig disk_config = 5 [(google.api.field_behavior) = OPTIONAL]; - // Optional. Specifies that this instance group contains preemptible + // Output only. Specifies that this instance group contains preemptible // instances. - bool is_preemptible = 6 [(google.api.field_behavior) = OPTIONAL]; + bool is_preemptible = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The config for Compute Engine Instance Group // Manager that manages this group. @@ -376,8 +392,8 @@ message InstanceGroupConfig { repeated AcceleratorConfig accelerators = 8 [(google.api.field_behavior) = OPTIONAL]; // Optional. Specifies the minimum cpu platform for the Instance Group. - // See [Dataproc→Minimum CPU Platform] - // (/dataproc/docs/concepts/compute/dataproc-min-cpu). + // See [Dataproc -> Minimum CPU + // Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). string min_cpu_platform = 9 [(google.api.field_behavior) = OPTIONAL]; } @@ -392,12 +408,13 @@ message ManagedGroupConfig { } // Specifies the type and number of accelerator cards attached to the instances -// of an instance. See [GPUs on Compute Engine](/compute/docs/gpus/). +// of an instance. See [GPUs on Compute +// Engine](https://cloud.google.com/compute/docs/gpus/). message AcceleratorConfig { // Full URL, partial URI, or short name of the accelerator type resource to // expose to this instance. See // [Compute Engine - // AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes). + // AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). // // Examples: // @@ -407,7 +424,7 @@ message AcceleratorConfig { // // **Auto Zone Exception**: If you are using the Dataproc // [Auto Zone - // Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + // Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) // feature, you must use the short name of the accelerator type // resource, for example, `nvidia-tesla-k80`. string accelerator_type_uri = 1; @@ -519,7 +536,8 @@ message SecurityConfig { // Specifies Kerberos related configuration. message KerberosConfig { - // Optional. Flag to indicate whether to Kerberize the cluster. + // Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set + // this field to true to enable Kerberos on a cluster. bool enable_kerberos = 1 [(google.api.field_behavior) = OPTIONAL]; // Required. The Cloud Storage URI of a KMS encrypted file containing the root @@ -590,10 +608,10 @@ message KerberosConfig { message SoftwareConfig { // Optional. The version of software inside the cluster. It must be one of the // supported [Dataproc - // Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions), + // Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions), // such as "1.2" (including a subminor version, such as "1.2.29"), or the // ["preview" - // version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). + // version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). // If unspecified, it defaults to the latest Debian version. string image_version = 1 [(google.api.field_behavior) = OPTIONAL]; @@ -613,8 +631,8 @@ message SoftwareConfig { // * spark: `spark-defaults.conf` // * yarn: `yarn-site.xml` // - // For more information, see - // [Cluster properties](/dataproc/docs/concepts/cluster-properties). + // For more information, see [Cluster + // properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties). map properties = 2 [(google.api.field_behavior) = OPTIONAL]; // Optional. The set of components to activate on the cluster. diff --git a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/jobs.proto b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/jobs.proto index 85921dc4c88..0379c09dd57 100644 --- a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/jobs.proto +++ b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/jobs.proto @@ -43,6 +43,19 @@ service JobController { option (google.api.method_signature) = "project_id,region,job"; } + // Submits job to a cluster. + rpc SubmitJobAsOperation(SubmitJobRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/projects/{project_id}/regions/{region}/jobs:submitAsOperation" + body: "*" + }; + option (google.api.method_signature) = "project_id, region, job"; + option (google.longrunning.operation_info) = { + response_type: "Job" + metadata_type: "JobMetadata" + }; + } + // Gets the resource representation for a job in a project. rpc GetJob(GetJobRequest) returns (Job) { option (google.api.http) = { @@ -420,7 +433,10 @@ message SparkRJob { LoggingConfig logging_config = 6 [(google.api.field_behavior) = OPTIONAL]; } -// A Dataproc job for running [Presto](https://prestosql.io/) queries +// A Dataproc job for running [Presto](https://prestosql.io/) queries. +// **IMPORTANT**: The [Dataproc Presto Optional +// Component](/dataproc/docs/concepts/components/presto) must be enabled when +// the cluster is created to submit a Presto job to the cluster. message PrestoJob { // Required. The sequence of Presto queries to execute, specified as // either an HCFS file URI or as a list of queries. @@ -692,6 +708,12 @@ message Job { // over time. This is in contrast to a user-settable reference.job_id that // may be reused over time. string job_uuid = 22 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Indicates whether the job is completed. If the value is `false`, + // the job is still in progress. If `true`, the job is completed, and + // `status.state` field will indicate if it was successful, failed, + // or cancelled. + bool done = 24 [(google.api.field_behavior) = OUTPUT_ONLY]; } // Job scheduling options. @@ -733,6 +755,21 @@ message SubmitJobRequest { string request_id = 4 [(google.api.field_behavior) = OPTIONAL]; } +// Job Operation metadata. +message JobMetadata { + // Output only. The job id. + string job_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Most recent job status. + JobStatus status = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Operation type. + string operation_type = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Job submission time. + google.protobuf.Timestamp start_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + // A request to get the resource representation for a job in a project. message GetJobRequest { // Required. The ID of the Google Cloud Platform project that the job diff --git a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/shared.proto b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/shared.proto index c6ff8f2869a..99d6e77602b 100644 --- a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/shared.proto +++ b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/shared.proto @@ -37,6 +37,12 @@ enum Component { // The Jupyter Notebook. JUPYTER = 1; + // The Presto query engine. + PRESTO = 6; + // The Zeppelin notebook. ZEPPELIN = 4; + + // The Zookeeper service. + ZOOKEEPER = 8; } diff --git a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/workflow_templates.proto b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/workflow_templates.proto index 2db557988e3..d1cfcc09f43 100644 --- a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/workflow_templates.proto +++ b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1/workflow_templates.proto @@ -329,7 +329,13 @@ message OrderedJob { PigJob pig_job = 6; + // Spark R job + SparkRJob spark_r_job = 11; + SparkSqlJob spark_sql_job = 7; + + // Presto job + PrestoJob presto_job = 12; } // Optional. The labels to associate with this job. diff --git a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1beta2/autoscaling_policies.proto b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1beta2/autoscaling_policies.proto index a5a7d56eba8..a7d6376be89 100644 --- a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1beta2/autoscaling_policies.proto +++ b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1beta2/autoscaling_policies.proto @@ -36,12 +36,10 @@ option (google.api.resource_definition) = { // Cloud Dataproc API. service AutoscalingPolicyService { option (google.api.default_host) = "dataproc.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; // Creates new autoscaling policy. - rpc CreateAutoscalingPolicy(CreateAutoscalingPolicyRequest) - returns (AutoscalingPolicy) { + rpc CreateAutoscalingPolicy(CreateAutoscalingPolicyRequest) returns (AutoscalingPolicy) { option (google.api.http) = { post: "/v1beta2/{parent=projects/*/locations/*}/autoscalingPolicies" body: "policy" @@ -57,8 +55,7 @@ service AutoscalingPolicyService { // // Disabled check for update_mask, because all updates will be full // replacements. - rpc UpdateAutoscalingPolicy(UpdateAutoscalingPolicyRequest) - returns (AutoscalingPolicy) { + rpc UpdateAutoscalingPolicy(UpdateAutoscalingPolicyRequest) returns (AutoscalingPolicy) { option (google.api.http) = { put: "/v1beta2/{policy.name=projects/*/locations/*/autoscalingPolicies/*}" body: "policy" @@ -71,8 +68,7 @@ service AutoscalingPolicyService { } // Retrieves autoscaling policy. - rpc GetAutoscalingPolicy(GetAutoscalingPolicyRequest) - returns (AutoscalingPolicy) { + rpc GetAutoscalingPolicy(GetAutoscalingPolicyRequest) returns (AutoscalingPolicy) { option (google.api.http) = { get: "/v1beta2/{name=projects/*/locations/*/autoscalingPolicies/*}" additional_bindings { @@ -83,8 +79,7 @@ service AutoscalingPolicyService { } // Lists autoscaling policies in the project. - rpc ListAutoscalingPolicies(ListAutoscalingPoliciesRequest) - returns (ListAutoscalingPoliciesResponse) { + rpc ListAutoscalingPolicies(ListAutoscalingPoliciesRequest) returns (ListAutoscalingPoliciesResponse) { option (google.api.http) = { get: "/v1beta2/{parent=projects/*/locations/*}/autoscalingPolicies" additional_bindings { @@ -96,8 +91,7 @@ service AutoscalingPolicyService { // Deletes an autoscaling policy. It is an error to delete an autoscaling // policy that is in use by one or more clusters. - rpc DeleteAutoscalingPolicy(DeleteAutoscalingPolicyRequest) - returns (google.protobuf.Empty) { + rpc DeleteAutoscalingPolicy(DeleteAutoscalingPolicyRequest) returns (google.protobuf.Empty) { option (google.api.http) = { delete: "/v1beta2/{name=projects/*/locations/*/autoscalingPolicies/*}" additional_bindings { @@ -142,26 +136,22 @@ message AutoscalingPolicy { } // Required. Describes how the autoscaler will operate for primary workers. - InstanceGroupAutoscalingPolicyConfig worker_config = 4 - [(google.api.field_behavior) = REQUIRED]; + InstanceGroupAutoscalingPolicyConfig worker_config = 4 [(google.api.field_behavior) = REQUIRED]; // Optional. Describes how the autoscaler will operate for secondary workers. - InstanceGroupAutoscalingPolicyConfig secondary_worker_config = 5 - [(google.api.field_behavior) = OPTIONAL]; + InstanceGroupAutoscalingPolicyConfig secondary_worker_config = 5 [(google.api.field_behavior) = OPTIONAL]; } // Basic algorithm for autoscaling. message BasicAutoscalingAlgorithm { // Required. YARN autoscaling configuration. - BasicYarnAutoscalingConfig yarn_config = 1 - [(google.api.field_behavior) = REQUIRED]; + BasicYarnAutoscalingConfig yarn_config = 1 [(google.api.field_behavior) = REQUIRED]; // Optional. Duration between scaling events. A scaling period starts after // the update operation from the previous event has completed. // // Bounds: [2m, 1d]. Default: 2m. - google.protobuf.Duration cooldown_period = 2 - [(google.api.field_behavior) = OPTIONAL]; + google.protobuf.Duration cooldown_period = 2 [(google.api.field_behavior) = OPTIONAL]; } // Basic autoscaling configurations for YARN. @@ -172,8 +162,7 @@ message BasicYarnAutoscalingConfig { // downscaling operations. // // Bounds: [0s, 1d]. - google.protobuf.Duration graceful_decommission_timeout = 5 - [(google.api.field_behavior) = REQUIRED]; + google.protobuf.Duration graceful_decommission_timeout = 5 [(google.api.field_behavior) = REQUIRED]; // Required. Fraction of average pending memory in the last cooldown period // for which to add workers. A scale-up factor of 1.0 will result in scaling @@ -200,8 +189,7 @@ message BasicYarnAutoscalingConfig { // on any recommended change. // // Bounds: [0.0, 1.0]. Default: 0.0. - double scale_up_min_worker_fraction = 3 - [(google.api.field_behavior) = OPTIONAL]; + double scale_up_min_worker_fraction = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. Minimum scale-down threshold as a fraction of total cluster size // before scaling occurs. For example, in a 20-worker cluster, a threshold of @@ -210,8 +198,7 @@ message BasicYarnAutoscalingConfig { // on any recommended change. // // Bounds: [0.0, 1.0]. Default: 0.0. - double scale_down_min_worker_fraction = 4 - [(google.api.field_behavior) = OPTIONAL]; + double scale_down_min_worker_fraction = 4 [(google.api.field_behavior) = OPTIONAL]; } // Configuration for the size bounds of an instance group, including its @@ -354,8 +341,7 @@ message ListAutoscalingPoliciesRequest { // A response to a request to list autoscaling policies in a project. message ListAutoscalingPoliciesResponse { // Output only. Autoscaling policies list. - repeated AutoscalingPolicy policies = 1 - [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated AutoscalingPolicy policies = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. This token is included in the response if there are more // results to fetch. diff --git a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1beta2/clusters.proto b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1beta2/clusters.proto index 2e9e648c5ac..93e6fe79e83 100644 --- a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1beta2/clusters.proto +++ b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1beta2/clusters.proto @@ -89,7 +89,7 @@ service ClusterController { option (google.api.method_signature) = "project_id, region, cluster_name"; } - // Lists all regions/{region}/clusters in a project. + // Lists all regions/{region}/clusters in a project alphabetically. rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { option (google.api.http) = { get: "/v1beta2/projects/{project_id}/regions/{region}/clusters" @@ -168,7 +168,7 @@ message ClusterConfig { // Compute Engine zone where your cluster is deployed, and then create // and manage this project-level, per-location bucket (see // [Dataproc staging - // bucket](/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + // bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). string config_bucket = 1 [(google.api.field_behavior) = OPTIONAL]; // Optional. The shared Compute Engine config settings for @@ -220,6 +220,33 @@ message ClusterConfig { // Optional. Security related configuration. SecurityConfig security_config = 18 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. + // Setting this is considered mutually exclusive with Compute Engine-based + // options such as `gce_cluster_config`, `master_config`, `worker_config`, + // `secondary_worker_config`, and `autoscaling_config`. + GkeClusterConfig gke_cluster_config = 19 [(google.api.field_behavior) = OPTIONAL]; +} + +// The GKE config for this cluster. +message GkeClusterConfig { + // A full, namespace-isolated deployment target for an existing GKE cluster. + message NamespacedGkeDeploymentTarget { + // Optional. The target GKE cluster to deploy to. + // Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}' + string target_gke_cluster = 1 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.resource_reference) = { + type: "container.googleapis.com/Cluster" + } + ]; + + // Optional. A namespace within the GKE cluster to deploy into. + string cluster_namespace = 2 [(google.api.field_behavior) = OPTIONAL]; + } + + // Optional. A target for the deployment. + NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(google.api.field_behavior) = OPTIONAL]; } // Endpoint config for this cluster @@ -274,7 +301,8 @@ message GceClusterConfig { // communications. Cannot be specified with subnetwork_uri. If neither // `network_uri` nor `subnetwork_uri` is specified, the "default" network of // the project is used, if it exists. Cannot be a "Custom Subnet Network" (see - // [Using Subnetworks](/compute/docs/subnetworks) for more information). + // [Using Subnetworks](https://cloud.google.com/compute/docs/subnetworks) for + // more information). // // A full URL, partial URI, or short name are valid. Examples: // @@ -302,15 +330,15 @@ message GceClusterConfig { bool internal_ip_only = 7 [(google.api.field_behavior) = OPTIONAL]; // Optional. The [Dataproc service - // account](/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_cloud_dataproc) + // account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_cloud_dataproc) // (also see [VM Data Plane - // identity](/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) + // identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) // used by Dataproc cluster VM instances to access Google Cloud Platform // services. // // If not specified, the // [Compute Engine default service - // account](/compute/docs/access/service-accounts#default_service_account) + // account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) // is used. string service_account = 8 [(google.api.field_behavior) = OPTIONAL]; @@ -330,8 +358,8 @@ message GceClusterConfig { // * https://www.googleapis.com/auth/devstorage.full_control repeated string service_account_scopes = 3 [(google.api.field_behavior) = OPTIONAL]; - // The Compute Engine tags to add to all instances (see - // [Tagging instances](/compute/docs/label-or-tag-resources#tags)). + // The Compute Engine tags to add to all instances (see [Tagging + // instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). repeated string tags = 4; // The Compute Engine metadata entries to add to all instances (see @@ -354,9 +382,24 @@ message InstanceGroupConfig { // from `cluster_name`, `num_instances`, and the instance group. repeated string instance_names = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Optional. The Compute Engine image resource used for cluster - // instances. It can be specified or may be inferred from - // `SoftwareConfig.image_version`. + // Optional. The Compute Engine image resource used for cluster instances. + // + // The URI can represent an image or image family. + // + // Image examples: + // + // * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` + // * `projects/[project_id]/global/images/[image-id]` + // * `image-id` + // + // Image family examples. Dataproc will use the most recent + // image from the family: + // + // * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` + // * `projects/[project_id]/global/images/family/[custom-image-family-name]` + // + // If the URI is unspecified, it will be inferred from + // `SoftwareConfig.image_version` or the system default. string image_uri = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. The Compute Engine machine type used for cluster instances. @@ -369,7 +412,7 @@ message InstanceGroupConfig { // // **Auto Zone Exception**: If you are using the Dataproc // [Auto Zone - // Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + // Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) // feature, you must use the short name of the machine type // resource, for example, `n1-standard-2`. string machine_type_uri = 4 [(google.api.field_behavior) = OPTIONAL]; @@ -377,9 +420,9 @@ message InstanceGroupConfig { // Optional. Disk option config settings. DiskConfig disk_config = 5 [(google.api.field_behavior) = OPTIONAL]; - // Optional. Specifies that this instance group contains preemptible + // Output only. Specifies that this instance group contains preemptible // instances. - bool is_preemptible = 6 [(google.api.field_behavior) = OPTIONAL]; + bool is_preemptible = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The config for Compute Engine Instance Group // Manager that manages this group. @@ -391,8 +434,8 @@ message InstanceGroupConfig { repeated AcceleratorConfig accelerators = 8 [(google.api.field_behavior) = OPTIONAL]; // Specifies the minimum cpu platform for the Instance Group. - // See [Dataproc→Minimum CPU Platform] - // (/dataproc/docs/concepts/compute/dataproc-min-cpu). + // See [Dataproc -> Minimum CPU + // Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). string min_cpu_platform = 9; } @@ -407,12 +450,13 @@ message ManagedGroupConfig { } // Specifies the type and number of accelerator cards attached to the instances -// of an instance group (see [GPUs on Compute Engine](/compute/docs/gpus/)). +// of an instance group (see [GPUs on Compute +// Engine](https://cloud.google.com/compute/docs/gpus/)). message AcceleratorConfig { // Full URL, partial URI, or short name of the accelerator type resource to // expose to this instance. See // [Compute Engine - // AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes) + // AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes) // // Examples // * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` @@ -421,7 +465,7 @@ message AcceleratorConfig { // // **Auto Zone Exception**: If you are using the Dataproc // [Auto Zone - // Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + // Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) // feature, you must use the short name of the accelerator type // resource, for example, `nvidia-tesla-k80`. string accelerator_type_uri = 1; @@ -487,7 +531,8 @@ message SecurityConfig { // Specifies Kerberos related configuration. message KerberosConfig { - // Optional. Flag to indicate whether to Kerberize the cluster. + // Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set + // this field to true to enable Kerberos on a cluster. bool enable_kerberos = 1 [(google.api.field_behavior) = OPTIONAL]; // Required. The Cloud Storage URI of a KMS encrypted file containing the root @@ -591,6 +636,15 @@ message ClusterStatus { // The cluster is being updated. It continues to accept and process jobs. UPDATING = 5; + + // The cluster is being stopped. It cannot be used. + STOPPING = 6; + + // The cluster is currently stopped. It is not ready for use. + STOPPED = 7; + + // The cluster is being started. It is not ready for use. + STARTING = 8; } // The cluster substate. @@ -631,10 +685,10 @@ message ClusterStatus { message SoftwareConfig { // Optional. The version of software inside the cluster. It must be one of the // supported [Dataproc - // Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions), + // Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions), // such as "1.2" (including a subminor version, such as "1.2.29"), or the // ["preview" - // version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). + // version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). // If unspecified, it defaults to the latest Debian version. string image_version = 1 [(google.api.field_behavior) = OPTIONAL]; @@ -654,8 +708,8 @@ message SoftwareConfig { // * spark: `spark-defaults.conf` // * yarn: `yarn-site.xml` // - // For more information, see - // [Cluster properties](/dataproc/docs/concepts/cluster-properties). + // For more information, see [Cluster + // properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties). map properties = 2 [(google.api.field_behavior) = OPTIONAL]; // The set of optional components to activate on the cluster. diff --git a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1beta2/jobs.proto b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1beta2/jobs.proto index 3208822fc11..325dc9e57df 100644 --- a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1beta2/jobs.proto +++ b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1beta2/jobs.proto @@ -43,6 +43,19 @@ service JobController { option (google.api.method_signature) = "project_id, region, job"; } + // Submits job to a cluster. + rpc SubmitJobAsOperation(SubmitJobRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta2/projects/{project_id}/regions/{region}/jobs:submitAsOperation" + body: "*" + }; + option (google.api.method_signature) = "project_id, region, job"; + option (google.longrunning.operation_info) = { + response_type: "Job" + metadata_type: "JobMetadata" + }; + } + // Gets the resource representation for a job in a project. rpc GetJob(GetJobRequest) returns (Job) { option (google.api.http) = { @@ -420,6 +433,42 @@ message SparkRJob { LoggingConfig logging_config = 6 [(google.api.field_behavior) = OPTIONAL]; } +// A Dataproc job for running [Presto](https://prestosql.io/) queries. +// **IMPORTANT**: The [Dataproc Presto Optional +// Component](/dataproc/docs/concepts/components/presto) must be enabled when +// the cluster is created to submit a Presto job to the cluster. +message PrestoJob { + // Required. The sequence of Presto queries to execute, specified as + // either an HCFS file URI or as a list of queries. + oneof queries { + // The HCFS URI of the script that contains SQL queries. + string query_file_uri = 1; + + // A list of queries. + QueryList query_list = 2; + } + + // Optional. Whether to continue executing queries if a query fails. + // The default value is `false`. Setting to `true` can be useful when + // executing independent parallel queries. + bool continue_on_failure = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The format in which query output will be displayed. See the + // Presto documentation for supported output formats + string output_format = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Presto client tags to attach to this query + repeated string client_tags = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A mapping of property names to values. Used to set Presto + // [session properties](https://prestodb.io/docs/current/sql/set-session.html) + // Equivalent to using the --session flag in the Presto CLI + map properties = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The runtime log config for job execution. + LoggingConfig logging_config = 7 [(google.api.field_behavior) = OPTIONAL]; +} + // Dataproc job config. message JobPlacement { // Required. The name of the cluster where the job will be submitted. @@ -591,19 +640,29 @@ message Job { // Required. The application/framework-specific portion of the job. oneof type_job { - HadoopJob hadoop_job = 3; + // Optional. Job is a Hadoop job. + HadoopJob hadoop_job = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job is a Spark job. + SparkJob spark_job = 4 [(google.api.field_behavior) = OPTIONAL]; - SparkJob spark_job = 4; + // Optional. Job is a PySpark job. + PySparkJob pyspark_job = 5 [(google.api.field_behavior) = OPTIONAL]; - PySparkJob pyspark_job = 5; + // Optional. Job is a Hive job. + HiveJob hive_job = 6 [(google.api.field_behavior) = OPTIONAL]; - HiveJob hive_job = 6; + // Optional. Job is a Pig job. + PigJob pig_job = 7 [(google.api.field_behavior) = OPTIONAL]; - PigJob pig_job = 7; + // Optional. Job is a SparkR job. + SparkRJob spark_r_job = 21 [(google.api.field_behavior) = OPTIONAL]; - SparkRJob spark_r_job = 21; + // Optional. Job is a SparkSql job. + SparkSqlJob spark_sql_job = 12 [(google.api.field_behavior) = OPTIONAL]; - SparkSqlJob spark_sql_job = 12; + // Optional. Job is a Presto job. + PrestoJob presto_job = 23 [(google.api.field_behavior) = OPTIONAL]; } // Output only. The job status. Additional application-specific @@ -649,6 +708,12 @@ message Job { // over time. This is in contrast to a user-settable reference.job_id that // may be reused over time. string job_uuid = 22 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Indicates whether the job is completed. If the value is `false`, + // the job is still in progress. If `true`, the job is completed, and + // `status.state` field will indicate if it was successful, failed, + // or cancelled. + bool done = 24 [(google.api.field_behavior) = OUTPUT_ONLY]; } // Job scheduling options. @@ -664,6 +729,21 @@ message JobScheduling { int32 max_failures_per_hour = 1 [(google.api.field_behavior) = OPTIONAL]; } +// Job Operation metadata. +message JobMetadata { + // Output only. The job id. + string job_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Most recent job status. + JobStatus status = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Operation type. + string operation_type = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Job submission time. + google.protobuf.Timestamp start_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + // A request to submit a job. message SubmitJobRequest { // Required. The ID of the Google Cloud Platform project that the job diff --git a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1beta2/shared.proto b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1beta2/shared.proto index eba80918754..130ae554d98 100644 --- a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1beta2/shared.proto +++ b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1beta2/shared.proto @@ -31,9 +31,15 @@ enum Component { // The Anaconda python distribution. ANACONDA = 5; + // Docker + DOCKER = 13; + // The Druid query engine. DRUID = 9; + // Flink + FLINK = 14; + // The Hive Web HCatalog (the REST service for accessing HCatalog). HIVE_WEBHCAT = 3; @@ -46,6 +52,12 @@ enum Component { // The Presto query engine. PRESTO = 6; + // The Ranger service. + RANGER = 12; + + // The Solr service. + SOLR = 10; + // The Zeppelin notebook. ZEPPELIN = 4; diff --git a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1beta2/workflow_templates.proto b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1beta2/workflow_templates.proto index a46282d9335..e5ef680bd4a 100644 --- a/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1beta2/workflow_templates.proto +++ b/packages/google-cloud-dataproc/protos/google/cloud/dataproc/v1beta2/workflow_templates.proto @@ -35,12 +35,10 @@ option java_package = "com.google.cloud.dataproc.v1beta2"; // Dataproc API. service WorkflowTemplateService { option (google.api.default_host) = "dataproc.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; // Creates new workflow template. - rpc CreateWorkflowTemplate(CreateWorkflowTemplateRequest) - returns (WorkflowTemplate) { + rpc CreateWorkflowTemplate(CreateWorkflowTemplateRequest) returns (WorkflowTemplate) { option (google.api.http) = { post: "/v1beta2/{parent=projects/*/regions/*}/workflowTemplates" body: "template" @@ -56,8 +54,7 @@ service WorkflowTemplateService { // // Can retrieve previously instantiated template by specifying optional // version parameter. - rpc GetWorkflowTemplate(GetWorkflowTemplateRequest) - returns (WorkflowTemplate) { + rpc GetWorkflowTemplate(GetWorkflowTemplateRequest) returns (WorkflowTemplate) { option (google.api.http) = { get: "/v1beta2/{name=projects/*/regions/*/workflowTemplates/*}" additional_bindings { @@ -87,8 +84,7 @@ service WorkflowTemplateService { // On successful completion, // [Operation.response][google.longrunning.Operation.response] will be // [Empty][google.protobuf.Empty]. - rpc InstantiateWorkflowTemplate(InstantiateWorkflowTemplateRequest) - returns (google.longrunning.Operation) { + rpc InstantiateWorkflowTemplate(InstantiateWorkflowTemplateRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1beta2/{name=projects/*/regions/*/workflowTemplates/*}:instantiate" body: "*" @@ -108,8 +104,7 @@ service WorkflowTemplateService { // Instantiates a template and begins execution. // // This method is equivalent to executing the sequence - // [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], - // [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate], + // [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate], // [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate]. // // The returned Operation can be used to track execution of @@ -130,9 +125,7 @@ service WorkflowTemplateService { // On successful completion, // [Operation.response][google.longrunning.Operation.response] will be // [Empty][google.protobuf.Empty]. - rpc InstantiateInlineWorkflowTemplate( - InstantiateInlineWorkflowTemplateRequest) - returns (google.longrunning.Operation) { + rpc InstantiateInlineWorkflowTemplate(InstantiateInlineWorkflowTemplateRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1beta2/{parent=projects/*/locations/*}/workflowTemplates:instantiateInline" body: "template" @@ -150,8 +143,7 @@ service WorkflowTemplateService { // Updates (replaces) workflow template. The updated template // must contain version that matches the current server version. - rpc UpdateWorkflowTemplate(UpdateWorkflowTemplateRequest) - returns (WorkflowTemplate) { + rpc UpdateWorkflowTemplate(UpdateWorkflowTemplateRequest) returns (WorkflowTemplate) { option (google.api.http) = { put: "/v1beta2/{template.name=projects/*/regions/*/workflowTemplates/*}" body: "template" @@ -164,8 +156,7 @@ service WorkflowTemplateService { } // Lists workflows that match the specified filter in the request. - rpc ListWorkflowTemplates(ListWorkflowTemplatesRequest) - returns (ListWorkflowTemplatesResponse) { + rpc ListWorkflowTemplates(ListWorkflowTemplatesRequest) returns (ListWorkflowTemplatesResponse) { option (google.api.http) = { get: "/v1beta2/{parent=projects/*/regions/*}/workflowTemplates" additional_bindings { @@ -176,8 +167,7 @@ service WorkflowTemplateService { } // Deletes a workflow template. It does not cancel in-progress workflows. - rpc DeleteWorkflowTemplate(DeleteWorkflowTemplateRequest) - returns (google.protobuf.Empty) { + rpc DeleteWorkflowTemplate(DeleteWorkflowTemplateRequest) returns (google.protobuf.Empty) { option (google.api.http) = { delete: "/v1beta2/{name=projects/*/regions/*/workflowTemplates/*}" additional_bindings { @@ -230,12 +220,10 @@ message WorkflowTemplate { int32 version = 3 [(google.api.field_behavior) = OPTIONAL]; // Output only. The time template was created. - google.protobuf.Timestamp create_time = 4 - [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The time template was last updated. - google.protobuf.Timestamp update_time = 5 - [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp update_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; // Optional. The labels to associate with this template. These labels // will be propagated to all jobs and clusters created by the workflow @@ -260,8 +248,7 @@ message WorkflowTemplate { // Optional. Template parameters whose values are substituted into the // template. Values for parameters must be provided when the template is // instantiated. - repeated TemplateParameter parameters = 9 - [(google.api.field_behavior) = OPTIONAL]; + repeated TemplateParameter parameters = 9 [(google.api.field_behavior) = OPTIONAL]; } // Specifies workflow execution target. @@ -329,13 +316,13 @@ message OrderedJob { // // The step id is used as prefix for job id, as job // `goog-dataproc-workflow-step-id` label, and in - // [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] - // field from other steps. + // [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other + // steps. // // The id must contain only letters (a-z, A-Z), numbers (0-9), // underscores (_), and hyphens (-). Cannot begin or end with underscore // or hyphen. Must consist of between 3 and 50 characters. - string step_id = 1; + string step_id = 1 [(google.api.field_behavior) = REQUIRED]; // Required. The job definition. oneof job_type { @@ -349,7 +336,13 @@ message OrderedJob { PigJob pig_job = 6; + // Spark R job + SparkRJob spark_r_job = 11; + SparkSqlJob spark_sql_job = 7; + + // Presto job + PrestoJob presto_job = 12; } // Optional. The labels to associate with this job. @@ -362,14 +355,14 @@ message OrderedJob { // the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} // // No more than 32 labels can be associated with a given job. - map labels = 8; + map labels = 8 [(google.api.field_behavior) = OPTIONAL]; // Optional. Job scheduling configuration. - JobScheduling scheduling = 9; + JobScheduling scheduling = 9 [(google.api.field_behavior) = OPTIONAL]; // Optional. The optional list of prerequisite job step_ids. // If not specified, the job will start at the beginning of workflow. - repeated string prerequisite_step_ids = 10; + repeated string prerequisite_step_ids = 10 [(google.api.field_behavior) = OPTIONAL]; } // A configurable parameter that replaces one or more fields in the template. @@ -395,10 +388,10 @@ message TemplateParameter { // A field is allowed to appear in at most one parameter's list of field // paths. // - // A field path is similar in syntax to a - // [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a - // field path that references the zone field of a workflow template's cluster - // selector would be specified as `placement.clusterSelector.zone`. + // A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask]. + // For example, a field path that references the zone field of a workflow + // template's cluster selector would be specified as + // `placement.clusterSelector.zone`. // // Also, field paths can reference fields using the following syntax: // @@ -505,15 +498,13 @@ message WorkflowMetadata { int32 version = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The create cluster operation metadata. - ClusterOperation create_cluster = 3 - [(google.api.field_behavior) = OUTPUT_ONLY]; + ClusterOperation create_cluster = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The workflow graph. WorkflowGraph graph = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The delete cluster operation metadata. - ClusterOperation delete_cluster = 5 - [(google.api.field_behavior) = OUTPUT_ONLY]; + ClusterOperation delete_cluster = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The workflow state. State state = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; @@ -525,12 +516,10 @@ message WorkflowMetadata { map parameters = 8; // Output only. Workflow start time. - google.protobuf.Timestamp start_time = 9 - [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp start_time = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Workflow end time. - google.protobuf.Timestamp end_time = 10 - [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp end_time = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The UUID of target cluster. string cluster_uuid = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; @@ -582,8 +571,7 @@ message WorkflowNode { string step_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Node's prerequisite nodes. - repeated string prerequisite_step_ids = 2 - [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated string prerequisite_step_ids = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The job id; populated after the node enters RUNNING state. string job_id = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; @@ -765,8 +753,7 @@ message ListWorkflowTemplatesRequest { // A response to a request to list workflow templates in a project. message ListWorkflowTemplatesResponse { // Output only. WorkflowTemplates list. - repeated WorkflowTemplate templates = 1 - [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated WorkflowTemplate templates = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. This token is included in the response if there are more // results to fetch. To fetch additional results, provide this value as the diff --git a/packages/google-cloud-dataproc/protos/protos.d.ts b/packages/google-cloud-dataproc/protos/protos.d.ts index e8e129a72f6..352a0a84a48 100644 --- a/packages/google-cloud-dataproc/protos/protos.d.ts +++ b/packages/google-cloud-dataproc/protos/protos.d.ts @@ -4098,7 +4098,9 @@ export namespace google { ANACONDA = 5, HIVE_WEBHCAT = 3, JUPYTER = 1, - ZEPPELIN = 4 + PRESTO = 6, + ZEPPELIN = 4, + ZOOKEEPER = 8 } /** Represents a JobController */ @@ -4135,6 +4137,20 @@ export namespace google { */ public submitJob(request: google.cloud.dataproc.v1.ISubmitJobRequest): Promise; + /** + * Calls SubmitJobAsOperation. + * @param request SubmitJobRequest message or plain object + * @param callback Node-style callback called with the error, if any, and Operation + */ + public submitJobAsOperation(request: google.cloud.dataproc.v1.ISubmitJobRequest, callback: google.cloud.dataproc.v1.JobController.SubmitJobAsOperationCallback): void; + + /** + * Calls SubmitJobAsOperation. + * @param request SubmitJobRequest message or plain object + * @returns Promise + */ + public submitJobAsOperation(request: google.cloud.dataproc.v1.ISubmitJobRequest): Promise; + /** * Calls GetJob. * @param request GetJobRequest message or plain object @@ -4215,6 +4231,13 @@ export namespace google { */ type SubmitJobCallback = (error: (Error|null), response?: google.cloud.dataproc.v1.Job) => void; + /** + * Callback as used by {@link google.cloud.dataproc.v1.JobController#submitJobAsOperation}. + * @param error Error, if any + * @param [response] Operation + */ + type SubmitJobAsOperationCallback = (error: (Error|null), response?: google.longrunning.Operation) => void; + /** * Callback as used by {@link google.cloud.dataproc.v1.JobController#getJob}. * @param error Error, if any @@ -5978,6 +6001,9 @@ export namespace google { /** Job jobUuid */ jobUuid?: (string|null); + + /** Job done */ + done?: (boolean|null); } /** Represents a Job. */ @@ -6043,6 +6069,9 @@ export namespace google { /** Job jobUuid. */ public jobUuid: string; + /** Job done. */ + public done: boolean; + /** Job typeJob. */ public typeJob?: ("hadoopJob"|"sparkJob"|"pysparkJob"|"hiveJob"|"pigJob"|"sparkRJob"|"sparkSqlJob"|"prestoJob"); @@ -6315,6 +6344,114 @@ export namespace google { public toJSON(): { [k: string]: any }; } + /** Properties of a JobMetadata. */ + interface IJobMetadata { + + /** JobMetadata jobId */ + jobId?: (string|null); + + /** JobMetadata status */ + status?: (google.cloud.dataproc.v1.IJobStatus|null); + + /** JobMetadata operationType */ + operationType?: (string|null); + + /** JobMetadata startTime */ + startTime?: (google.protobuf.ITimestamp|null); + } + + /** Represents a JobMetadata. */ + class JobMetadata implements IJobMetadata { + + /** + * Constructs a new JobMetadata. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.dataproc.v1.IJobMetadata); + + /** JobMetadata jobId. */ + public jobId: string; + + /** JobMetadata status. */ + public status?: (google.cloud.dataproc.v1.IJobStatus|null); + + /** JobMetadata operationType. */ + public operationType: string; + + /** JobMetadata startTime. */ + public startTime?: (google.protobuf.ITimestamp|null); + + /** + * Creates a new JobMetadata instance using the specified properties. + * @param [properties] Properties to set + * @returns JobMetadata instance + */ + public static create(properties?: google.cloud.dataproc.v1.IJobMetadata): google.cloud.dataproc.v1.JobMetadata; + + /** + * Encodes the specified JobMetadata message. Does not implicitly {@link google.cloud.dataproc.v1.JobMetadata.verify|verify} messages. + * @param message JobMetadata message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.dataproc.v1.IJobMetadata, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified JobMetadata message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.JobMetadata.verify|verify} messages. + * @param message JobMetadata message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.dataproc.v1.IJobMetadata, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a JobMetadata message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns JobMetadata + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.dataproc.v1.JobMetadata; + + /** + * Decodes a JobMetadata message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns JobMetadata + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.dataproc.v1.JobMetadata; + + /** + * Verifies a JobMetadata message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a JobMetadata message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns JobMetadata + */ + public static fromObject(object: { [k: string]: any }): google.cloud.dataproc.v1.JobMetadata; + + /** + * Creates a plain object from a JobMetadata message. Also converts values to other types if specified. + * @param message JobMetadata + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.dataproc.v1.JobMetadata, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this JobMetadata to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + } + /** Properties of a GetJobRequest. */ interface IGetJobRequest { @@ -7845,9 +7982,15 @@ export namespace google { /** OrderedJob pigJob */ pigJob?: (google.cloud.dataproc.v1.IPigJob|null); + /** OrderedJob sparkRJob */ + sparkRJob?: (google.cloud.dataproc.v1.ISparkRJob|null); + /** OrderedJob sparkSqlJob */ sparkSqlJob?: (google.cloud.dataproc.v1.ISparkSqlJob|null); + /** OrderedJob prestoJob */ + prestoJob?: (google.cloud.dataproc.v1.IPrestoJob|null); + /** OrderedJob labels */ labels?: ({ [k: string]: string }|null); @@ -7885,9 +8028,15 @@ export namespace google { /** OrderedJob pigJob. */ public pigJob?: (google.cloud.dataproc.v1.IPigJob|null); + /** OrderedJob sparkRJob. */ + public sparkRJob?: (google.cloud.dataproc.v1.ISparkRJob|null); + /** OrderedJob sparkSqlJob. */ public sparkSqlJob?: (google.cloud.dataproc.v1.ISparkSqlJob|null); + /** OrderedJob prestoJob. */ + public prestoJob?: (google.cloud.dataproc.v1.IPrestoJob|null); + /** OrderedJob labels. */ public labels: { [k: string]: string }; @@ -7898,7 +8047,7 @@ export namespace google { public prerequisiteStepIds: string[]; /** OrderedJob jobType. */ - public jobType?: ("hadoopJob"|"sparkJob"|"pysparkJob"|"hiveJob"|"pigJob"|"sparkSqlJob"); + public jobType?: ("hadoopJob"|"sparkJob"|"pysparkJob"|"hiveJob"|"pigJob"|"sparkRJob"|"sparkSqlJob"|"prestoJob"); /** * Creates a new OrderedJob instance using the specified properties. @@ -11070,6 +11219,9 @@ export namespace google { /** ClusterConfig securityConfig */ securityConfig?: (google.cloud.dataproc.v1beta2.ISecurityConfig|null); + + /** ClusterConfig gkeClusterConfig */ + gkeClusterConfig?: (google.cloud.dataproc.v1beta2.IGkeClusterConfig|null); } /** Represents a ClusterConfig. */ @@ -11117,6 +11269,9 @@ export namespace google { /** ClusterConfig securityConfig. */ public securityConfig?: (google.cloud.dataproc.v1beta2.ISecurityConfig|null); + /** ClusterConfig gkeClusterConfig. */ + public gkeClusterConfig?: (google.cloud.dataproc.v1beta2.IGkeClusterConfig|null); + /** * Creates a new ClusterConfig instance using the specified properties. * @param [properties] Properties to set @@ -11188,6 +11343,195 @@ export namespace google { public toJSON(): { [k: string]: any }; } + /** Properties of a GkeClusterConfig. */ + interface IGkeClusterConfig { + + /** GkeClusterConfig namespacedGkeDeploymentTarget */ + namespacedGkeDeploymentTarget?: (google.cloud.dataproc.v1beta2.GkeClusterConfig.INamespacedGkeDeploymentTarget|null); + } + + /** Represents a GkeClusterConfig. */ + class GkeClusterConfig implements IGkeClusterConfig { + + /** + * Constructs a new GkeClusterConfig. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.dataproc.v1beta2.IGkeClusterConfig); + + /** GkeClusterConfig namespacedGkeDeploymentTarget. */ + public namespacedGkeDeploymentTarget?: (google.cloud.dataproc.v1beta2.GkeClusterConfig.INamespacedGkeDeploymentTarget|null); + + /** + * Creates a new GkeClusterConfig instance using the specified properties. + * @param [properties] Properties to set + * @returns GkeClusterConfig instance + */ + public static create(properties?: google.cloud.dataproc.v1beta2.IGkeClusterConfig): google.cloud.dataproc.v1beta2.GkeClusterConfig; + + /** + * Encodes the specified GkeClusterConfig message. Does not implicitly {@link google.cloud.dataproc.v1beta2.GkeClusterConfig.verify|verify} messages. + * @param message GkeClusterConfig message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.dataproc.v1beta2.IGkeClusterConfig, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified GkeClusterConfig message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1beta2.GkeClusterConfig.verify|verify} messages. + * @param message GkeClusterConfig message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.dataproc.v1beta2.IGkeClusterConfig, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a GkeClusterConfig message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns GkeClusterConfig + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.dataproc.v1beta2.GkeClusterConfig; + + /** + * Decodes a GkeClusterConfig message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns GkeClusterConfig + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.dataproc.v1beta2.GkeClusterConfig; + + /** + * Verifies a GkeClusterConfig message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a GkeClusterConfig message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns GkeClusterConfig + */ + public static fromObject(object: { [k: string]: any }): google.cloud.dataproc.v1beta2.GkeClusterConfig; + + /** + * Creates a plain object from a GkeClusterConfig message. Also converts values to other types if specified. + * @param message GkeClusterConfig + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.dataproc.v1beta2.GkeClusterConfig, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this GkeClusterConfig to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + } + + namespace GkeClusterConfig { + + /** Properties of a NamespacedGkeDeploymentTarget. */ + interface INamespacedGkeDeploymentTarget { + + /** NamespacedGkeDeploymentTarget targetGkeCluster */ + targetGkeCluster?: (string|null); + + /** NamespacedGkeDeploymentTarget clusterNamespace */ + clusterNamespace?: (string|null); + } + + /** Represents a NamespacedGkeDeploymentTarget. */ + class NamespacedGkeDeploymentTarget implements INamespacedGkeDeploymentTarget { + + /** + * Constructs a new NamespacedGkeDeploymentTarget. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.dataproc.v1beta2.GkeClusterConfig.INamespacedGkeDeploymentTarget); + + /** NamespacedGkeDeploymentTarget targetGkeCluster. */ + public targetGkeCluster: string; + + /** NamespacedGkeDeploymentTarget clusterNamespace. */ + public clusterNamespace: string; + + /** + * Creates a new NamespacedGkeDeploymentTarget instance using the specified properties. + * @param [properties] Properties to set + * @returns NamespacedGkeDeploymentTarget instance + */ + public static create(properties?: google.cloud.dataproc.v1beta2.GkeClusterConfig.INamespacedGkeDeploymentTarget): google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget; + + /** + * Encodes the specified NamespacedGkeDeploymentTarget message. Does not implicitly {@link google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget.verify|verify} messages. + * @param message NamespacedGkeDeploymentTarget message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.dataproc.v1beta2.GkeClusterConfig.INamespacedGkeDeploymentTarget, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified NamespacedGkeDeploymentTarget message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget.verify|verify} messages. + * @param message NamespacedGkeDeploymentTarget message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.dataproc.v1beta2.GkeClusterConfig.INamespacedGkeDeploymentTarget, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a NamespacedGkeDeploymentTarget message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns NamespacedGkeDeploymentTarget + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget; + + /** + * Decodes a NamespacedGkeDeploymentTarget message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns NamespacedGkeDeploymentTarget + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget; + + /** + * Verifies a NamespacedGkeDeploymentTarget message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a NamespacedGkeDeploymentTarget message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns NamespacedGkeDeploymentTarget + */ + public static fromObject(object: { [k: string]: any }): google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget; + + /** + * Creates a plain object from a NamespacedGkeDeploymentTarget message. Also converts values to other types if specified. + * @param message NamespacedGkeDeploymentTarget + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this NamespacedGkeDeploymentTarget to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + } + } + /** Properties of an EndpointConfig. */ interface IEndpointConfig { @@ -12622,7 +12966,10 @@ export namespace google { RUNNING = 2, ERROR = 3, DELETING = 4, - UPDATING = 5 + UPDATING = 5, + STOPPING = 6, + STOPPED = 7, + STARTING = 8 } /** Substate enum. */ @@ -13800,11 +14147,15 @@ export namespace google { enum Component { COMPONENT_UNSPECIFIED = 0, ANACONDA = 5, + DOCKER = 13, DRUID = 9, + FLINK = 14, HIVE_WEBHCAT = 3, JUPYTER = 1, KERBEROS = 7, PRESTO = 6, + RANGER = 12, + SOLR = 10, ZEPPELIN = 4, ZOOKEEPER = 8 } @@ -13843,6 +14194,20 @@ export namespace google { */ public submitJob(request: google.cloud.dataproc.v1beta2.ISubmitJobRequest): Promise; + /** + * Calls SubmitJobAsOperation. + * @param request SubmitJobRequest message or plain object + * @param callback Node-style callback called with the error, if any, and Operation + */ + public submitJobAsOperation(request: google.cloud.dataproc.v1beta2.ISubmitJobRequest, callback: google.cloud.dataproc.v1beta2.JobController.SubmitJobAsOperationCallback): void; + + /** + * Calls SubmitJobAsOperation. + * @param request SubmitJobRequest message or plain object + * @returns Promise + */ + public submitJobAsOperation(request: google.cloud.dataproc.v1beta2.ISubmitJobRequest): Promise; + /** * Calls GetJob. * @param request GetJobRequest message or plain object @@ -13923,6 +14288,13 @@ export namespace google { */ type SubmitJobCallback = (error: (Error|null), response?: google.cloud.dataproc.v1beta2.Job) => void; + /** + * Callback as used by {@link google.cloud.dataproc.v1beta2.JobController#submitJobAsOperation}. + * @param error Error, if any + * @param [response] Operation + */ + type SubmitJobAsOperationCallback = (error: (Error|null), response?: google.longrunning.Operation) => void; + /** * Callback as used by {@link google.cloud.dataproc.v1beta2.JobController#getJob}. * @param error Error, if any @@ -15052,6 +15424,135 @@ export namespace google { public toJSON(): { [k: string]: any }; } + /** Properties of a PrestoJob. */ + interface IPrestoJob { + + /** PrestoJob queryFileUri */ + queryFileUri?: (string|null); + + /** PrestoJob queryList */ + queryList?: (google.cloud.dataproc.v1beta2.IQueryList|null); + + /** PrestoJob continueOnFailure */ + continueOnFailure?: (boolean|null); + + /** PrestoJob outputFormat */ + outputFormat?: (string|null); + + /** PrestoJob clientTags */ + clientTags?: (string[]|null); + + /** PrestoJob properties */ + properties?: ({ [k: string]: string }|null); + + /** PrestoJob loggingConfig */ + loggingConfig?: (google.cloud.dataproc.v1beta2.ILoggingConfig|null); + } + + /** Represents a PrestoJob. */ + class PrestoJob implements IPrestoJob { + + /** + * Constructs a new PrestoJob. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.dataproc.v1beta2.IPrestoJob); + + /** PrestoJob queryFileUri. */ + public queryFileUri: string; + + /** PrestoJob queryList. */ + public queryList?: (google.cloud.dataproc.v1beta2.IQueryList|null); + + /** PrestoJob continueOnFailure. */ + public continueOnFailure: boolean; + + /** PrestoJob outputFormat. */ + public outputFormat: string; + + /** PrestoJob clientTags. */ + public clientTags: string[]; + + /** PrestoJob properties. */ + public properties: { [k: string]: string }; + + /** PrestoJob loggingConfig. */ + public loggingConfig?: (google.cloud.dataproc.v1beta2.ILoggingConfig|null); + + /** PrestoJob queries. */ + public queries?: ("queryFileUri"|"queryList"); + + /** + * Creates a new PrestoJob instance using the specified properties. + * @param [properties] Properties to set + * @returns PrestoJob instance + */ + public static create(properties?: google.cloud.dataproc.v1beta2.IPrestoJob): google.cloud.dataproc.v1beta2.PrestoJob; + + /** + * Encodes the specified PrestoJob message. Does not implicitly {@link google.cloud.dataproc.v1beta2.PrestoJob.verify|verify} messages. + * @param message PrestoJob message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.dataproc.v1beta2.IPrestoJob, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified PrestoJob message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1beta2.PrestoJob.verify|verify} messages. + * @param message PrestoJob message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.dataproc.v1beta2.IPrestoJob, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a PrestoJob message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns PrestoJob + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.dataproc.v1beta2.PrestoJob; + + /** + * Decodes a PrestoJob message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns PrestoJob + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.dataproc.v1beta2.PrestoJob; + + /** + * Verifies a PrestoJob message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a PrestoJob message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns PrestoJob + */ + public static fromObject(object: { [k: string]: any }): google.cloud.dataproc.v1beta2.PrestoJob; + + /** + * Creates a plain object from a PrestoJob message. Also converts values to other types if specified. + * @param message PrestoJob + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.dataproc.v1beta2.PrestoJob, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this PrestoJob to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + } + /** Properties of a JobPlacement. */ interface IJobPlacement { @@ -15531,6 +16032,9 @@ export namespace google { /** Job sparkSqlJob */ sparkSqlJob?: (google.cloud.dataproc.v1beta2.ISparkSqlJob|null); + /** Job prestoJob */ + prestoJob?: (google.cloud.dataproc.v1beta2.IPrestoJob|null); + /** Job status */ status?: (google.cloud.dataproc.v1beta2.IJobStatus|null); @@ -15557,6 +16061,9 @@ export namespace google { /** Job jobUuid */ jobUuid?: (string|null); + + /** Job done */ + done?: (boolean|null); } /** Represents a Job. */ @@ -15595,6 +16102,9 @@ export namespace google { /** Job sparkSqlJob. */ public sparkSqlJob?: (google.cloud.dataproc.v1beta2.ISparkSqlJob|null); + /** Job prestoJob. */ + public prestoJob?: (google.cloud.dataproc.v1beta2.IPrestoJob|null); + /** Job status. */ public status?: (google.cloud.dataproc.v1beta2.IJobStatus|null); @@ -15622,8 +16132,11 @@ export namespace google { /** Job jobUuid. */ public jobUuid: string; + /** Job done. */ + public done: boolean; + /** Job typeJob. */ - public typeJob?: ("hadoopJob"|"sparkJob"|"pysparkJob"|"hiveJob"|"pigJob"|"sparkRJob"|"sparkSqlJob"); + public typeJob?: ("hadoopJob"|"sparkJob"|"pysparkJob"|"hiveJob"|"pigJob"|"sparkRJob"|"sparkSqlJob"|"prestoJob"); /** * Creates a new Job instance using the specified properties. @@ -15786,6 +16299,114 @@ export namespace google { public toJSON(): { [k: string]: any }; } + /** Properties of a JobMetadata. */ + interface IJobMetadata { + + /** JobMetadata jobId */ + jobId?: (string|null); + + /** JobMetadata status */ + status?: (google.cloud.dataproc.v1beta2.IJobStatus|null); + + /** JobMetadata operationType */ + operationType?: (string|null); + + /** JobMetadata startTime */ + startTime?: (google.protobuf.ITimestamp|null); + } + + /** Represents a JobMetadata. */ + class JobMetadata implements IJobMetadata { + + /** + * Constructs a new JobMetadata. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.dataproc.v1beta2.IJobMetadata); + + /** JobMetadata jobId. */ + public jobId: string; + + /** JobMetadata status. */ + public status?: (google.cloud.dataproc.v1beta2.IJobStatus|null); + + /** JobMetadata operationType. */ + public operationType: string; + + /** JobMetadata startTime. */ + public startTime?: (google.protobuf.ITimestamp|null); + + /** + * Creates a new JobMetadata instance using the specified properties. + * @param [properties] Properties to set + * @returns JobMetadata instance + */ + public static create(properties?: google.cloud.dataproc.v1beta2.IJobMetadata): google.cloud.dataproc.v1beta2.JobMetadata; + + /** + * Encodes the specified JobMetadata message. Does not implicitly {@link google.cloud.dataproc.v1beta2.JobMetadata.verify|verify} messages. + * @param message JobMetadata message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.dataproc.v1beta2.IJobMetadata, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified JobMetadata message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1beta2.JobMetadata.verify|verify} messages. + * @param message JobMetadata message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.dataproc.v1beta2.IJobMetadata, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a JobMetadata message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns JobMetadata + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.dataproc.v1beta2.JobMetadata; + + /** + * Decodes a JobMetadata message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns JobMetadata + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.dataproc.v1beta2.JobMetadata; + + /** + * Verifies a JobMetadata message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a JobMetadata message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns JobMetadata + */ + public static fromObject(object: { [k: string]: any }): google.cloud.dataproc.v1beta2.JobMetadata; + + /** + * Creates a plain object from a JobMetadata message. Also converts values to other types if specified. + * @param message JobMetadata + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.dataproc.v1beta2.JobMetadata, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this JobMetadata to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + } + /** Properties of a SubmitJobRequest. */ interface ISubmitJobRequest { @@ -17424,9 +18045,15 @@ export namespace google { /** OrderedJob pigJob */ pigJob?: (google.cloud.dataproc.v1beta2.IPigJob|null); + /** OrderedJob sparkRJob */ + sparkRJob?: (google.cloud.dataproc.v1beta2.ISparkRJob|null); + /** OrderedJob sparkSqlJob */ sparkSqlJob?: (google.cloud.dataproc.v1beta2.ISparkSqlJob|null); + /** OrderedJob prestoJob */ + prestoJob?: (google.cloud.dataproc.v1beta2.IPrestoJob|null); + /** OrderedJob labels */ labels?: ({ [k: string]: string }|null); @@ -17464,9 +18091,15 @@ export namespace google { /** OrderedJob pigJob. */ public pigJob?: (google.cloud.dataproc.v1beta2.IPigJob|null); + /** OrderedJob sparkRJob. */ + public sparkRJob?: (google.cloud.dataproc.v1beta2.ISparkRJob|null); + /** OrderedJob sparkSqlJob. */ public sparkSqlJob?: (google.cloud.dataproc.v1beta2.ISparkSqlJob|null); + /** OrderedJob prestoJob. */ + public prestoJob?: (google.cloud.dataproc.v1beta2.IPrestoJob|null); + /** OrderedJob labels. */ public labels: { [k: string]: string }; @@ -17477,7 +18110,7 @@ export namespace google { public prerequisiteStepIds: string[]; /** OrderedJob jobType. */ - public jobType?: ("hadoopJob"|"sparkJob"|"pysparkJob"|"hiveJob"|"pigJob"|"sparkSqlJob"); + public jobType?: ("hadoopJob"|"sparkJob"|"pysparkJob"|"hiveJob"|"pigJob"|"sparkRJob"|"sparkSqlJob"|"prestoJob"); /** * Creates a new OrderedJob instance using the specified properties. diff --git a/packages/google-cloud-dataproc/protos/protos.js b/packages/google-cloud-dataproc/protos/protos.js index 610b28ca91f..689c248938b 100644 --- a/packages/google-cloud-dataproc/protos/protos.js +++ b/packages/google-cloud-dataproc/protos/protos.js @@ -6977,7 +6977,9 @@ case 5: case 3: case 1: + case 6: case 4: + case 8: break; } } @@ -7028,10 +7030,18 @@ case 1: message.optionalComponents[i] = 1; break; + case "PRESTO": + case 6: + message.optionalComponents[i] = 6; + break; case "ZEPPELIN": case 4: message.optionalComponents[i] = 4; break; + case "ZOOKEEPER": + case 8: + message.optionalComponents[i] = 8; + break; } } return message; @@ -9984,7 +9994,9 @@ * @property {number} ANACONDA=5 ANACONDA value * @property {number} HIVE_WEBHCAT=3 HIVE_WEBHCAT value * @property {number} JUPYTER=1 JUPYTER value + * @property {number} PRESTO=6 PRESTO value * @property {number} ZEPPELIN=4 ZEPPELIN value + * @property {number} ZOOKEEPER=8 ZOOKEEPER value */ v1.Component = (function() { var valuesById = {}, values = Object.create(valuesById); @@ -9992,7 +10004,9 @@ values[valuesById[5] = "ANACONDA"] = 5; values[valuesById[3] = "HIVE_WEBHCAT"] = 3; values[valuesById[1] = "JUPYTER"] = 1; + values[valuesById[6] = "PRESTO"] = 6; values[valuesById[4] = "ZEPPELIN"] = 4; + values[valuesById[8] = "ZOOKEEPER"] = 8; return values; })(); @@ -10061,6 +10075,39 @@ * @variation 2 */ + /** + * Callback as used by {@link google.cloud.dataproc.v1.JobController#submitJobAsOperation}. + * @memberof google.cloud.dataproc.v1.JobController + * @typedef SubmitJobAsOperationCallback + * @type {function} + * @param {Error|null} error Error, if any + * @param {google.longrunning.Operation} [response] Operation + */ + + /** + * Calls SubmitJobAsOperation. + * @function submitJobAsOperation + * @memberof google.cloud.dataproc.v1.JobController + * @instance + * @param {google.cloud.dataproc.v1.ISubmitJobRequest} request SubmitJobRequest message or plain object + * @param {google.cloud.dataproc.v1.JobController.SubmitJobAsOperationCallback} callback Node-style callback called with the error, if any, and Operation + * @returns {undefined} + * @variation 1 + */ + Object.defineProperty(JobController.prototype.submitJobAsOperation = function submitJobAsOperation(request, callback) { + return this.rpcCall(submitJobAsOperation, $root.google.cloud.dataproc.v1.SubmitJobRequest, $root.google.longrunning.Operation, request, callback); + }, "name", { value: "SubmitJobAsOperation" }); + + /** + * Calls SubmitJobAsOperation. + * @function submitJobAsOperation + * @memberof google.cloud.dataproc.v1.JobController + * @instance + * @param {google.cloud.dataproc.v1.ISubmitJobRequest} request SubmitJobRequest message or plain object + * @returns {Promise} Promise + * @variation 2 + */ + /** * Callback as used by {@link google.cloud.dataproc.v1.JobController#getJob}. * @memberof google.cloud.dataproc.v1.JobController @@ -15210,6 +15257,7 @@ * @property {Object.|null} [labels] Job labels * @property {google.cloud.dataproc.v1.IJobScheduling|null} [scheduling] Job scheduling * @property {string|null} [jobUuid] Job jobUuid + * @property {boolean|null} [done] Job done */ /** @@ -15374,6 +15422,14 @@ */ Job.prototype.jobUuid = ""; + /** + * Job done. + * @member {boolean} done + * @memberof google.cloud.dataproc.v1.Job + * @instance + */ + Job.prototype.done = false; + // OneOf field names bound to virtual getters and setters var $oneOfFields; @@ -15451,6 +15507,8 @@ writer.uint32(/* id 22, wireType 2 =*/178).string(message.jobUuid); if (message.prestoJob != null && message.hasOwnProperty("prestoJob")) $root.google.cloud.dataproc.v1.PrestoJob.encode(message.prestoJob, writer.uint32(/* id 23, wireType 2 =*/186).fork()).ldelim(); + if (message.done != null && message.hasOwnProperty("done")) + writer.uint32(/* id 24, wireType 0 =*/192).bool(message.done); return writer; }; @@ -15548,6 +15606,9 @@ case 22: message.jobUuid = reader.string(); break; + case 24: + message.done = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -15717,6 +15778,9 @@ if (message.jobUuid != null && message.hasOwnProperty("jobUuid")) if (!$util.isString(message.jobUuid)) return "jobUuid: string expected"; + if (message.done != null && message.hasOwnProperty("done")) + if (typeof message.done !== "boolean") + return "done: boolean expected"; return null; }; @@ -15825,6 +15889,8 @@ } if (object.jobUuid != null) message.jobUuid = String(object.jobUuid); + if (object.done != null) + message.done = Boolean(object.done); return message; }; @@ -15855,6 +15921,7 @@ object.driverOutputResourceUri = ""; object.scheduling = null; object.jobUuid = ""; + object.done = false; } if (message.reference != null && message.hasOwnProperty("reference")) object.reference = $root.google.cloud.dataproc.v1.JobReference.toObject(message.reference, options); @@ -15926,6 +15993,8 @@ if (options.oneofs) object.typeJob = "prestoJob"; } + if (message.done != null && message.hasOwnProperty("done")) + object.done = message.done; return object; }; @@ -16389,6 +16458,270 @@ return SubmitJobRequest; })(); + v1.JobMetadata = (function() { + + /** + * Properties of a JobMetadata. + * @memberof google.cloud.dataproc.v1 + * @interface IJobMetadata + * @property {string|null} [jobId] JobMetadata jobId + * @property {google.cloud.dataproc.v1.IJobStatus|null} [status] JobMetadata status + * @property {string|null} [operationType] JobMetadata operationType + * @property {google.protobuf.ITimestamp|null} [startTime] JobMetadata startTime + */ + + /** + * Constructs a new JobMetadata. + * @memberof google.cloud.dataproc.v1 + * @classdesc Represents a JobMetadata. + * @implements IJobMetadata + * @constructor + * @param {google.cloud.dataproc.v1.IJobMetadata=} [properties] Properties to set + */ + function JobMetadata(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * JobMetadata jobId. + * @member {string} jobId + * @memberof google.cloud.dataproc.v1.JobMetadata + * @instance + */ + JobMetadata.prototype.jobId = ""; + + /** + * JobMetadata status. + * @member {google.cloud.dataproc.v1.IJobStatus|null|undefined} status + * @memberof google.cloud.dataproc.v1.JobMetadata + * @instance + */ + JobMetadata.prototype.status = null; + + /** + * JobMetadata operationType. + * @member {string} operationType + * @memberof google.cloud.dataproc.v1.JobMetadata + * @instance + */ + JobMetadata.prototype.operationType = ""; + + /** + * JobMetadata startTime. + * @member {google.protobuf.ITimestamp|null|undefined} startTime + * @memberof google.cloud.dataproc.v1.JobMetadata + * @instance + */ + JobMetadata.prototype.startTime = null; + + /** + * Creates a new JobMetadata instance using the specified properties. + * @function create + * @memberof google.cloud.dataproc.v1.JobMetadata + * @static + * @param {google.cloud.dataproc.v1.IJobMetadata=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1.JobMetadata} JobMetadata instance + */ + JobMetadata.create = function create(properties) { + return new JobMetadata(properties); + }; + + /** + * Encodes the specified JobMetadata message. Does not implicitly {@link google.cloud.dataproc.v1.JobMetadata.verify|verify} messages. + * @function encode + * @memberof google.cloud.dataproc.v1.JobMetadata + * @static + * @param {google.cloud.dataproc.v1.IJobMetadata} message JobMetadata message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + JobMetadata.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.jobId != null && message.hasOwnProperty("jobId")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.jobId); + if (message.status != null && message.hasOwnProperty("status")) + $root.google.cloud.dataproc.v1.JobStatus.encode(message.status, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.operationType != null && message.hasOwnProperty("operationType")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.operationType); + if (message.startTime != null && message.hasOwnProperty("startTime")) + $root.google.protobuf.Timestamp.encode(message.startTime, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified JobMetadata message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1.JobMetadata.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.dataproc.v1.JobMetadata + * @static + * @param {google.cloud.dataproc.v1.IJobMetadata} message JobMetadata message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + JobMetadata.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a JobMetadata message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.dataproc.v1.JobMetadata + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.dataproc.v1.JobMetadata} JobMetadata + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + JobMetadata.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1.JobMetadata(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.jobId = reader.string(); + break; + case 2: + message.status = $root.google.cloud.dataproc.v1.JobStatus.decode(reader, reader.uint32()); + break; + case 3: + message.operationType = reader.string(); + break; + case 4: + message.startTime = $root.google.protobuf.Timestamp.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a JobMetadata message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.dataproc.v1.JobMetadata + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.dataproc.v1.JobMetadata} JobMetadata + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + JobMetadata.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a JobMetadata message. + * @function verify + * @memberof google.cloud.dataproc.v1.JobMetadata + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + JobMetadata.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.jobId != null && message.hasOwnProperty("jobId")) + if (!$util.isString(message.jobId)) + return "jobId: string expected"; + if (message.status != null && message.hasOwnProperty("status")) { + var error = $root.google.cloud.dataproc.v1.JobStatus.verify(message.status); + if (error) + return "status." + error; + } + if (message.operationType != null && message.hasOwnProperty("operationType")) + if (!$util.isString(message.operationType)) + return "operationType: string expected"; + if (message.startTime != null && message.hasOwnProperty("startTime")) { + var error = $root.google.protobuf.Timestamp.verify(message.startTime); + if (error) + return "startTime." + error; + } + return null; + }; + + /** + * Creates a JobMetadata message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.dataproc.v1.JobMetadata + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.dataproc.v1.JobMetadata} JobMetadata + */ + JobMetadata.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1.JobMetadata) + return object; + var message = new $root.google.cloud.dataproc.v1.JobMetadata(); + if (object.jobId != null) + message.jobId = String(object.jobId); + if (object.status != null) { + if (typeof object.status !== "object") + throw TypeError(".google.cloud.dataproc.v1.JobMetadata.status: object expected"); + message.status = $root.google.cloud.dataproc.v1.JobStatus.fromObject(object.status); + } + if (object.operationType != null) + message.operationType = String(object.operationType); + if (object.startTime != null) { + if (typeof object.startTime !== "object") + throw TypeError(".google.cloud.dataproc.v1.JobMetadata.startTime: object expected"); + message.startTime = $root.google.protobuf.Timestamp.fromObject(object.startTime); + } + return message; + }; + + /** + * Creates a plain object from a JobMetadata message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.dataproc.v1.JobMetadata + * @static + * @param {google.cloud.dataproc.v1.JobMetadata} message JobMetadata + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + JobMetadata.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + object.jobId = ""; + object.status = null; + object.operationType = ""; + object.startTime = null; + } + if (message.jobId != null && message.hasOwnProperty("jobId")) + object.jobId = message.jobId; + if (message.status != null && message.hasOwnProperty("status")) + object.status = $root.google.cloud.dataproc.v1.JobStatus.toObject(message.status, options); + if (message.operationType != null && message.hasOwnProperty("operationType")) + object.operationType = message.operationType; + if (message.startTime != null && message.hasOwnProperty("startTime")) + object.startTime = $root.google.protobuf.Timestamp.toObject(message.startTime, options); + return object; + }; + + /** + * Converts this JobMetadata to JSON. + * @function toJSON + * @memberof google.cloud.dataproc.v1.JobMetadata + * @instance + * @returns {Object.} JSON object + */ + JobMetadata.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + return JobMetadata; + })(); + v1.GetJobRequest = (function() { /** @@ -20122,7 +20455,9 @@ * @property {google.cloud.dataproc.v1.IPySparkJob|null} [pysparkJob] OrderedJob pysparkJob * @property {google.cloud.dataproc.v1.IHiveJob|null} [hiveJob] OrderedJob hiveJob * @property {google.cloud.dataproc.v1.IPigJob|null} [pigJob] OrderedJob pigJob + * @property {google.cloud.dataproc.v1.ISparkRJob|null} [sparkRJob] OrderedJob sparkRJob * @property {google.cloud.dataproc.v1.ISparkSqlJob|null} [sparkSqlJob] OrderedJob sparkSqlJob + * @property {google.cloud.dataproc.v1.IPrestoJob|null} [prestoJob] OrderedJob prestoJob * @property {Object.|null} [labels] OrderedJob labels * @property {google.cloud.dataproc.v1.IJobScheduling|null} [scheduling] OrderedJob scheduling * @property {Array.|null} [prerequisiteStepIds] OrderedJob prerequisiteStepIds @@ -20193,6 +20528,14 @@ */ OrderedJob.prototype.pigJob = null; + /** + * OrderedJob sparkRJob. + * @member {google.cloud.dataproc.v1.ISparkRJob|null|undefined} sparkRJob + * @memberof google.cloud.dataproc.v1.OrderedJob + * @instance + */ + OrderedJob.prototype.sparkRJob = null; + /** * OrderedJob sparkSqlJob. * @member {google.cloud.dataproc.v1.ISparkSqlJob|null|undefined} sparkSqlJob @@ -20201,6 +20544,14 @@ */ OrderedJob.prototype.sparkSqlJob = null; + /** + * OrderedJob prestoJob. + * @member {google.cloud.dataproc.v1.IPrestoJob|null|undefined} prestoJob + * @memberof google.cloud.dataproc.v1.OrderedJob + * @instance + */ + OrderedJob.prototype.prestoJob = null; + /** * OrderedJob labels. * @member {Object.} labels @@ -20230,12 +20581,12 @@ /** * OrderedJob jobType. - * @member {"hadoopJob"|"sparkJob"|"pysparkJob"|"hiveJob"|"pigJob"|"sparkSqlJob"|undefined} jobType + * @member {"hadoopJob"|"sparkJob"|"pysparkJob"|"hiveJob"|"pigJob"|"sparkRJob"|"sparkSqlJob"|"prestoJob"|undefined} jobType * @memberof google.cloud.dataproc.v1.OrderedJob * @instance */ Object.defineProperty(OrderedJob.prototype, "jobType", { - get: $util.oneOfGetter($oneOfFields = ["hadoopJob", "sparkJob", "pysparkJob", "hiveJob", "pigJob", "sparkSqlJob"]), + get: $util.oneOfGetter($oneOfFields = ["hadoopJob", "sparkJob", "pysparkJob", "hiveJob", "pigJob", "sparkRJob", "sparkSqlJob", "prestoJob"]), set: $util.oneOfSetter($oneOfFields) }); @@ -20285,6 +20636,10 @@ if (message.prerequisiteStepIds != null && message.prerequisiteStepIds.length) for (var i = 0; i < message.prerequisiteStepIds.length; ++i) writer.uint32(/* id 10, wireType 2 =*/82).string(message.prerequisiteStepIds[i]); + if (message.sparkRJob != null && message.hasOwnProperty("sparkRJob")) + $root.google.cloud.dataproc.v1.SparkRJob.encode(message.sparkRJob, writer.uint32(/* id 11, wireType 2 =*/90).fork()).ldelim(); + if (message.prestoJob != null && message.hasOwnProperty("prestoJob")) + $root.google.cloud.dataproc.v1.PrestoJob.encode(message.prestoJob, writer.uint32(/* id 12, wireType 2 =*/98).fork()).ldelim(); return writer; }; @@ -20337,9 +20692,15 @@ case 6: message.pigJob = $root.google.cloud.dataproc.v1.PigJob.decode(reader, reader.uint32()); break; + case 11: + message.sparkRJob = $root.google.cloud.dataproc.v1.SparkRJob.decode(reader, reader.uint32()); + break; case 7: message.sparkSqlJob = $root.google.cloud.dataproc.v1.SparkSqlJob.decode(reader, reader.uint32()); break; + case 12: + message.prestoJob = $root.google.cloud.dataproc.v1.PrestoJob.decode(reader, reader.uint32()); + break; case 8: reader.skip().pos++; if (message.labels === $util.emptyObject) @@ -20443,6 +20804,16 @@ return "pigJob." + error; } } + if (message.sparkRJob != null && message.hasOwnProperty("sparkRJob")) { + if (properties.jobType === 1) + return "jobType: multiple values"; + properties.jobType = 1; + { + var error = $root.google.cloud.dataproc.v1.SparkRJob.verify(message.sparkRJob); + if (error) + return "sparkRJob." + error; + } + } if (message.sparkSqlJob != null && message.hasOwnProperty("sparkSqlJob")) { if (properties.jobType === 1) return "jobType: multiple values"; @@ -20453,6 +20824,16 @@ return "sparkSqlJob." + error; } } + if (message.prestoJob != null && message.hasOwnProperty("prestoJob")) { + if (properties.jobType === 1) + return "jobType: multiple values"; + properties.jobType = 1; + { + var error = $root.google.cloud.dataproc.v1.PrestoJob.verify(message.prestoJob); + if (error) + return "prestoJob." + error; + } + } if (message.labels != null && message.hasOwnProperty("labels")) { if (!$util.isObject(message.labels)) return "labels: object expected"; @@ -20515,11 +20896,21 @@ throw TypeError(".google.cloud.dataproc.v1.OrderedJob.pigJob: object expected"); message.pigJob = $root.google.cloud.dataproc.v1.PigJob.fromObject(object.pigJob); } + if (object.sparkRJob != null) { + if (typeof object.sparkRJob !== "object") + throw TypeError(".google.cloud.dataproc.v1.OrderedJob.sparkRJob: object expected"); + message.sparkRJob = $root.google.cloud.dataproc.v1.SparkRJob.fromObject(object.sparkRJob); + } if (object.sparkSqlJob != null) { if (typeof object.sparkSqlJob !== "object") throw TypeError(".google.cloud.dataproc.v1.OrderedJob.sparkSqlJob: object expected"); message.sparkSqlJob = $root.google.cloud.dataproc.v1.SparkSqlJob.fromObject(object.sparkSqlJob); } + if (object.prestoJob != null) { + if (typeof object.prestoJob !== "object") + throw TypeError(".google.cloud.dataproc.v1.OrderedJob.prestoJob: object expected"); + message.prestoJob = $root.google.cloud.dataproc.v1.PrestoJob.fromObject(object.prestoJob); + } if (object.labels) { if (typeof object.labels !== "object") throw TypeError(".google.cloud.dataproc.v1.OrderedJob.labels: object expected"); @@ -20608,6 +20999,16 @@ for (var j = 0; j < message.prerequisiteStepIds.length; ++j) object.prerequisiteStepIds[j] = message.prerequisiteStepIds[j]; } + if (message.sparkRJob != null && message.hasOwnProperty("sparkRJob")) { + object.sparkRJob = $root.google.cloud.dataproc.v1.SparkRJob.toObject(message.sparkRJob, options); + if (options.oneofs) + object.jobType = "sparkRJob"; + } + if (message.prestoJob != null && message.hasOwnProperty("prestoJob")) { + object.prestoJob = $root.google.cloud.dataproc.v1.PrestoJob.toObject(message.prestoJob, options); + if (options.oneofs) + object.jobType = "prestoJob"; + } return object; }; @@ -27791,6 +28192,7 @@ * @property {google.cloud.dataproc.v1beta2.IAutoscalingConfig|null} [autoscalingConfig] ClusterConfig autoscalingConfig * @property {google.cloud.dataproc.v1beta2.IEndpointConfig|null} [endpointConfig] ClusterConfig endpointConfig * @property {google.cloud.dataproc.v1beta2.ISecurityConfig|null} [securityConfig] ClusterConfig securityConfig + * @property {google.cloud.dataproc.v1beta2.IGkeClusterConfig|null} [gkeClusterConfig] ClusterConfig gkeClusterConfig */ /** @@ -27905,6 +28307,14 @@ */ ClusterConfig.prototype.securityConfig = null; + /** + * ClusterConfig gkeClusterConfig. + * @member {google.cloud.dataproc.v1beta2.IGkeClusterConfig|null|undefined} gkeClusterConfig + * @memberof google.cloud.dataproc.v1beta2.ClusterConfig + * @instance + */ + ClusterConfig.prototype.gkeClusterConfig = null; + /** * Creates a new ClusterConfig instance using the specified properties. * @function create @@ -27954,6 +28364,8 @@ $root.google.cloud.dataproc.v1beta2.EndpointConfig.encode(message.endpointConfig, writer.uint32(/* id 17, wireType 2 =*/138).fork()).ldelim(); if (message.securityConfig != null && message.hasOwnProperty("securityConfig")) $root.google.cloud.dataproc.v1beta2.SecurityConfig.encode(message.securityConfig, writer.uint32(/* id 18, wireType 2 =*/146).fork()).ldelim(); + if (message.gkeClusterConfig != null && message.hasOwnProperty("gkeClusterConfig")) + $root.google.cloud.dataproc.v1beta2.GkeClusterConfig.encode(message.gkeClusterConfig, writer.uint32(/* id 19, wireType 2 =*/154).fork()).ldelim(); return writer; }; @@ -28026,6 +28438,9 @@ case 18: message.securityConfig = $root.google.cloud.dataproc.v1beta2.SecurityConfig.decode(reader, reader.uint32()); break; + case 19: + message.gkeClusterConfig = $root.google.cloud.dataproc.v1beta2.GkeClusterConfig.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -28123,6 +28538,11 @@ if (error) return "securityConfig." + error; } + if (message.gkeClusterConfig != null && message.hasOwnProperty("gkeClusterConfig")) { + var error = $root.google.cloud.dataproc.v1beta2.GkeClusterConfig.verify(message.gkeClusterConfig); + if (error) + return "gkeClusterConfig." + error; + } return null; }; @@ -28200,6 +28620,11 @@ throw TypeError(".google.cloud.dataproc.v1beta2.ClusterConfig.securityConfig: object expected"); message.securityConfig = $root.google.cloud.dataproc.v1beta2.SecurityConfig.fromObject(object.securityConfig); } + if (object.gkeClusterConfig != null) { + if (typeof object.gkeClusterConfig !== "object") + throw TypeError(".google.cloud.dataproc.v1beta2.ClusterConfig.gkeClusterConfig: object expected"); + message.gkeClusterConfig = $root.google.cloud.dataproc.v1beta2.GkeClusterConfig.fromObject(object.gkeClusterConfig); + } return message; }; @@ -28230,6 +28655,7 @@ object.autoscalingConfig = null; object.endpointConfig = null; object.securityConfig = null; + object.gkeClusterConfig = null; } if (message.configBucket != null && message.hasOwnProperty("configBucket")) object.configBucket = message.configBucket; @@ -28258,6 +28684,8 @@ object.endpointConfig = $root.google.cloud.dataproc.v1beta2.EndpointConfig.toObject(message.endpointConfig, options); if (message.securityConfig != null && message.hasOwnProperty("securityConfig")) object.securityConfig = $root.google.cloud.dataproc.v1beta2.SecurityConfig.toObject(message.securityConfig, options); + if (message.gkeClusterConfig != null && message.hasOwnProperty("gkeClusterConfig")) + object.gkeClusterConfig = $root.google.cloud.dataproc.v1beta2.GkeClusterConfig.toObject(message.gkeClusterConfig, options); return object; }; @@ -28275,6 +28703,408 @@ return ClusterConfig; })(); + v1beta2.GkeClusterConfig = (function() { + + /** + * Properties of a GkeClusterConfig. + * @memberof google.cloud.dataproc.v1beta2 + * @interface IGkeClusterConfig + * @property {google.cloud.dataproc.v1beta2.GkeClusterConfig.INamespacedGkeDeploymentTarget|null} [namespacedGkeDeploymentTarget] GkeClusterConfig namespacedGkeDeploymentTarget + */ + + /** + * Constructs a new GkeClusterConfig. + * @memberof google.cloud.dataproc.v1beta2 + * @classdesc Represents a GkeClusterConfig. + * @implements IGkeClusterConfig + * @constructor + * @param {google.cloud.dataproc.v1beta2.IGkeClusterConfig=} [properties] Properties to set + */ + function GkeClusterConfig(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * GkeClusterConfig namespacedGkeDeploymentTarget. + * @member {google.cloud.dataproc.v1beta2.GkeClusterConfig.INamespacedGkeDeploymentTarget|null|undefined} namespacedGkeDeploymentTarget + * @memberof google.cloud.dataproc.v1beta2.GkeClusterConfig + * @instance + */ + GkeClusterConfig.prototype.namespacedGkeDeploymentTarget = null; + + /** + * Creates a new GkeClusterConfig instance using the specified properties. + * @function create + * @memberof google.cloud.dataproc.v1beta2.GkeClusterConfig + * @static + * @param {google.cloud.dataproc.v1beta2.IGkeClusterConfig=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1beta2.GkeClusterConfig} GkeClusterConfig instance + */ + GkeClusterConfig.create = function create(properties) { + return new GkeClusterConfig(properties); + }; + + /** + * Encodes the specified GkeClusterConfig message. Does not implicitly {@link google.cloud.dataproc.v1beta2.GkeClusterConfig.verify|verify} messages. + * @function encode + * @memberof google.cloud.dataproc.v1beta2.GkeClusterConfig + * @static + * @param {google.cloud.dataproc.v1beta2.IGkeClusterConfig} message GkeClusterConfig message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GkeClusterConfig.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.namespacedGkeDeploymentTarget != null && message.hasOwnProperty("namespacedGkeDeploymentTarget")) + $root.google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget.encode(message.namespacedGkeDeploymentTarget, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified GkeClusterConfig message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1beta2.GkeClusterConfig.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.dataproc.v1beta2.GkeClusterConfig + * @static + * @param {google.cloud.dataproc.v1beta2.IGkeClusterConfig} message GkeClusterConfig message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GkeClusterConfig.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a GkeClusterConfig message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.dataproc.v1beta2.GkeClusterConfig + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.dataproc.v1beta2.GkeClusterConfig} GkeClusterConfig + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + GkeClusterConfig.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1beta2.GkeClusterConfig(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.namespacedGkeDeploymentTarget = $root.google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a GkeClusterConfig message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.dataproc.v1beta2.GkeClusterConfig + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.dataproc.v1beta2.GkeClusterConfig} GkeClusterConfig + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + GkeClusterConfig.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a GkeClusterConfig message. + * @function verify + * @memberof google.cloud.dataproc.v1beta2.GkeClusterConfig + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + GkeClusterConfig.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.namespacedGkeDeploymentTarget != null && message.hasOwnProperty("namespacedGkeDeploymentTarget")) { + var error = $root.google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget.verify(message.namespacedGkeDeploymentTarget); + if (error) + return "namespacedGkeDeploymentTarget." + error; + } + return null; + }; + + /** + * Creates a GkeClusterConfig message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.dataproc.v1beta2.GkeClusterConfig + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.dataproc.v1beta2.GkeClusterConfig} GkeClusterConfig + */ + GkeClusterConfig.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1beta2.GkeClusterConfig) + return object; + var message = new $root.google.cloud.dataproc.v1beta2.GkeClusterConfig(); + if (object.namespacedGkeDeploymentTarget != null) { + if (typeof object.namespacedGkeDeploymentTarget !== "object") + throw TypeError(".google.cloud.dataproc.v1beta2.GkeClusterConfig.namespacedGkeDeploymentTarget: object expected"); + message.namespacedGkeDeploymentTarget = $root.google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget.fromObject(object.namespacedGkeDeploymentTarget); + } + return message; + }; + + /** + * Creates a plain object from a GkeClusterConfig message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.dataproc.v1beta2.GkeClusterConfig + * @static + * @param {google.cloud.dataproc.v1beta2.GkeClusterConfig} message GkeClusterConfig + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + GkeClusterConfig.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) + object.namespacedGkeDeploymentTarget = null; + if (message.namespacedGkeDeploymentTarget != null && message.hasOwnProperty("namespacedGkeDeploymentTarget")) + object.namespacedGkeDeploymentTarget = $root.google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget.toObject(message.namespacedGkeDeploymentTarget, options); + return object; + }; + + /** + * Converts this GkeClusterConfig to JSON. + * @function toJSON + * @memberof google.cloud.dataproc.v1beta2.GkeClusterConfig + * @instance + * @returns {Object.} JSON object + */ + GkeClusterConfig.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + GkeClusterConfig.NamespacedGkeDeploymentTarget = (function() { + + /** + * Properties of a NamespacedGkeDeploymentTarget. + * @memberof google.cloud.dataproc.v1beta2.GkeClusterConfig + * @interface INamespacedGkeDeploymentTarget + * @property {string|null} [targetGkeCluster] NamespacedGkeDeploymentTarget targetGkeCluster + * @property {string|null} [clusterNamespace] NamespacedGkeDeploymentTarget clusterNamespace + */ + + /** + * Constructs a new NamespacedGkeDeploymentTarget. + * @memberof google.cloud.dataproc.v1beta2.GkeClusterConfig + * @classdesc Represents a NamespacedGkeDeploymentTarget. + * @implements INamespacedGkeDeploymentTarget + * @constructor + * @param {google.cloud.dataproc.v1beta2.GkeClusterConfig.INamespacedGkeDeploymentTarget=} [properties] Properties to set + */ + function NamespacedGkeDeploymentTarget(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * NamespacedGkeDeploymentTarget targetGkeCluster. + * @member {string} targetGkeCluster + * @memberof google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget + * @instance + */ + NamespacedGkeDeploymentTarget.prototype.targetGkeCluster = ""; + + /** + * NamespacedGkeDeploymentTarget clusterNamespace. + * @member {string} clusterNamespace + * @memberof google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget + * @instance + */ + NamespacedGkeDeploymentTarget.prototype.clusterNamespace = ""; + + /** + * Creates a new NamespacedGkeDeploymentTarget instance using the specified properties. + * @function create + * @memberof google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget + * @static + * @param {google.cloud.dataproc.v1beta2.GkeClusterConfig.INamespacedGkeDeploymentTarget=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget} NamespacedGkeDeploymentTarget instance + */ + NamespacedGkeDeploymentTarget.create = function create(properties) { + return new NamespacedGkeDeploymentTarget(properties); + }; + + /** + * Encodes the specified NamespacedGkeDeploymentTarget message. Does not implicitly {@link google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget.verify|verify} messages. + * @function encode + * @memberof google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget + * @static + * @param {google.cloud.dataproc.v1beta2.GkeClusterConfig.INamespacedGkeDeploymentTarget} message NamespacedGkeDeploymentTarget message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + NamespacedGkeDeploymentTarget.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.targetGkeCluster != null && message.hasOwnProperty("targetGkeCluster")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.targetGkeCluster); + if (message.clusterNamespace != null && message.hasOwnProperty("clusterNamespace")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.clusterNamespace); + return writer; + }; + + /** + * Encodes the specified NamespacedGkeDeploymentTarget message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget + * @static + * @param {google.cloud.dataproc.v1beta2.GkeClusterConfig.INamespacedGkeDeploymentTarget} message NamespacedGkeDeploymentTarget message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + NamespacedGkeDeploymentTarget.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a NamespacedGkeDeploymentTarget message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget} NamespacedGkeDeploymentTarget + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + NamespacedGkeDeploymentTarget.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.targetGkeCluster = reader.string(); + break; + case 2: + message.clusterNamespace = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a NamespacedGkeDeploymentTarget message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget} NamespacedGkeDeploymentTarget + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + NamespacedGkeDeploymentTarget.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a NamespacedGkeDeploymentTarget message. + * @function verify + * @memberof google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + NamespacedGkeDeploymentTarget.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.targetGkeCluster != null && message.hasOwnProperty("targetGkeCluster")) + if (!$util.isString(message.targetGkeCluster)) + return "targetGkeCluster: string expected"; + if (message.clusterNamespace != null && message.hasOwnProperty("clusterNamespace")) + if (!$util.isString(message.clusterNamespace)) + return "clusterNamespace: string expected"; + return null; + }; + + /** + * Creates a NamespacedGkeDeploymentTarget message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget} NamespacedGkeDeploymentTarget + */ + NamespacedGkeDeploymentTarget.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget) + return object; + var message = new $root.google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget(); + if (object.targetGkeCluster != null) + message.targetGkeCluster = String(object.targetGkeCluster); + if (object.clusterNamespace != null) + message.clusterNamespace = String(object.clusterNamespace); + return message; + }; + + /** + * Creates a plain object from a NamespacedGkeDeploymentTarget message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget + * @static + * @param {google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget} message NamespacedGkeDeploymentTarget + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + NamespacedGkeDeploymentTarget.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + object.targetGkeCluster = ""; + object.clusterNamespace = ""; + } + if (message.targetGkeCluster != null && message.hasOwnProperty("targetGkeCluster")) + object.targetGkeCluster = message.targetGkeCluster; + if (message.clusterNamespace != null && message.hasOwnProperty("clusterNamespace")) + object.clusterNamespace = message.clusterNamespace; + return object; + }; + + /** + * Converts this NamespacedGkeDeploymentTarget to JSON. + * @function toJSON + * @memberof google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget + * @instance + * @returns {Object.} JSON object + */ + NamespacedGkeDeploymentTarget.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + return NamespacedGkeDeploymentTarget; + })(); + + return GkeClusterConfig; + })(); + v1beta2.EndpointConfig = (function() { /** @@ -31756,6 +32586,9 @@ case 3: case 4: case 5: + case 6: + case 7: + case 8: break; } if (message.detail != null && message.hasOwnProperty("detail")) @@ -31815,6 +32648,18 @@ case 5: message.state = 5; break; + case "STOPPING": + case 6: + message.state = 6; + break; + case "STOPPED": + case 7: + message.state = 7; + break; + case "STARTING": + case 8: + message.state = 8; + break; } if (object.detail != null) message.detail = String(object.detail); @@ -31891,6 +32736,9 @@ * @property {number} ERROR=3 ERROR value * @property {number} DELETING=4 DELETING value * @property {number} UPDATING=5 UPDATING value + * @property {number} STOPPING=6 STOPPING value + * @property {number} STOPPED=7 STOPPED value + * @property {number} STARTING=8 STARTING value */ ClusterStatus.State = (function() { var valuesById = {}, values = Object.create(valuesById); @@ -31900,6 +32748,9 @@ values[valuesById[3] = "ERROR"] = 3; values[valuesById[4] = "DELETING"] = 4; values[valuesById[5] = "UPDATING"] = 5; + values[valuesById[6] = "STOPPING"] = 6; + values[valuesById[7] = "STOPPED"] = 7; + values[valuesById[8] = "STARTING"] = 8; return values; })(); @@ -32119,11 +32970,15 @@ return "optionalComponents: enum value[] expected"; case 0: case 5: + case 13: case 9: + case 14: case 3: case 1: case 7: case 6: + case 12: + case 10: case 4: case 8: break; @@ -32168,10 +33023,18 @@ case 5: message.optionalComponents[i] = 5; break; + case "DOCKER": + case 13: + message.optionalComponents[i] = 13; + break; case "DRUID": case 9: message.optionalComponents[i] = 9; break; + case "FLINK": + case 14: + message.optionalComponents[i] = 14; + break; case "HIVE_WEBHCAT": case 3: message.optionalComponents[i] = 3; @@ -32188,6 +33051,14 @@ case 6: message.optionalComponents[i] = 6; break; + case "RANGER": + case 12: + message.optionalComponents[i] = 12; + break; + case "SOLR": + case 10: + message.optionalComponents[i] = 10; + break; case "ZEPPELIN": case 4: message.optionalComponents[i] = 4; @@ -34845,11 +35716,15 @@ * @enum {string} * @property {number} COMPONENT_UNSPECIFIED=0 COMPONENT_UNSPECIFIED value * @property {number} ANACONDA=5 ANACONDA value + * @property {number} DOCKER=13 DOCKER value * @property {number} DRUID=9 DRUID value + * @property {number} FLINK=14 FLINK value * @property {number} HIVE_WEBHCAT=3 HIVE_WEBHCAT value * @property {number} JUPYTER=1 JUPYTER value * @property {number} KERBEROS=7 KERBEROS value * @property {number} PRESTO=6 PRESTO value + * @property {number} RANGER=12 RANGER value + * @property {number} SOLR=10 SOLR value * @property {number} ZEPPELIN=4 ZEPPELIN value * @property {number} ZOOKEEPER=8 ZOOKEEPER value */ @@ -34857,11 +35732,15 @@ var valuesById = {}, values = Object.create(valuesById); values[valuesById[0] = "COMPONENT_UNSPECIFIED"] = 0; values[valuesById[5] = "ANACONDA"] = 5; + values[valuesById[13] = "DOCKER"] = 13; values[valuesById[9] = "DRUID"] = 9; + values[valuesById[14] = "FLINK"] = 14; values[valuesById[3] = "HIVE_WEBHCAT"] = 3; values[valuesById[1] = "JUPYTER"] = 1; values[valuesById[7] = "KERBEROS"] = 7; values[valuesById[6] = "PRESTO"] = 6; + values[valuesById[12] = "RANGER"] = 12; + values[valuesById[10] = "SOLR"] = 10; values[valuesById[4] = "ZEPPELIN"] = 4; values[valuesById[8] = "ZOOKEEPER"] = 8; return values; @@ -34932,6 +35811,39 @@ * @variation 2 */ + /** + * Callback as used by {@link google.cloud.dataproc.v1beta2.JobController#submitJobAsOperation}. + * @memberof google.cloud.dataproc.v1beta2.JobController + * @typedef SubmitJobAsOperationCallback + * @type {function} + * @param {Error|null} error Error, if any + * @param {google.longrunning.Operation} [response] Operation + */ + + /** + * Calls SubmitJobAsOperation. + * @function submitJobAsOperation + * @memberof google.cloud.dataproc.v1beta2.JobController + * @instance + * @param {google.cloud.dataproc.v1beta2.ISubmitJobRequest} request SubmitJobRequest message or plain object + * @param {google.cloud.dataproc.v1beta2.JobController.SubmitJobAsOperationCallback} callback Node-style callback called with the error, if any, and Operation + * @returns {undefined} + * @variation 1 + */ + Object.defineProperty(JobController.prototype.submitJobAsOperation = function submitJobAsOperation(request, callback) { + return this.rpcCall(submitJobAsOperation, $root.google.cloud.dataproc.v1beta2.SubmitJobRequest, $root.google.longrunning.Operation, request, callback); + }, "name", { value: "SubmitJobAsOperation" }); + + /** + * Calls SubmitJobAsOperation. + * @function submitJobAsOperation + * @memberof google.cloud.dataproc.v1beta2.JobController + * @instance + * @param {google.cloud.dataproc.v1beta2.ISubmitJobRequest} request SubmitJobRequest message or plain object + * @returns {Promise} Promise + * @variation 2 + */ + /** * Callback as used by {@link google.cloud.dataproc.v1beta2.JobController#getJob}. * @memberof google.cloud.dataproc.v1beta2.JobController @@ -38529,6 +39441,401 @@ return SparkRJob; })(); + v1beta2.PrestoJob = (function() { + + /** + * Properties of a PrestoJob. + * @memberof google.cloud.dataproc.v1beta2 + * @interface IPrestoJob + * @property {string|null} [queryFileUri] PrestoJob queryFileUri + * @property {google.cloud.dataproc.v1beta2.IQueryList|null} [queryList] PrestoJob queryList + * @property {boolean|null} [continueOnFailure] PrestoJob continueOnFailure + * @property {string|null} [outputFormat] PrestoJob outputFormat + * @property {Array.|null} [clientTags] PrestoJob clientTags + * @property {Object.|null} [properties] PrestoJob properties + * @property {google.cloud.dataproc.v1beta2.ILoggingConfig|null} [loggingConfig] PrestoJob loggingConfig + */ + + /** + * Constructs a new PrestoJob. + * @memberof google.cloud.dataproc.v1beta2 + * @classdesc Represents a PrestoJob. + * @implements IPrestoJob + * @constructor + * @param {google.cloud.dataproc.v1beta2.IPrestoJob=} [properties] Properties to set + */ + function PrestoJob(properties) { + this.clientTags = []; + this.properties = {}; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * PrestoJob queryFileUri. + * @member {string} queryFileUri + * @memberof google.cloud.dataproc.v1beta2.PrestoJob + * @instance + */ + PrestoJob.prototype.queryFileUri = ""; + + /** + * PrestoJob queryList. + * @member {google.cloud.dataproc.v1beta2.IQueryList|null|undefined} queryList + * @memberof google.cloud.dataproc.v1beta2.PrestoJob + * @instance + */ + PrestoJob.prototype.queryList = null; + + /** + * PrestoJob continueOnFailure. + * @member {boolean} continueOnFailure + * @memberof google.cloud.dataproc.v1beta2.PrestoJob + * @instance + */ + PrestoJob.prototype.continueOnFailure = false; + + /** + * PrestoJob outputFormat. + * @member {string} outputFormat + * @memberof google.cloud.dataproc.v1beta2.PrestoJob + * @instance + */ + PrestoJob.prototype.outputFormat = ""; + + /** + * PrestoJob clientTags. + * @member {Array.} clientTags + * @memberof google.cloud.dataproc.v1beta2.PrestoJob + * @instance + */ + PrestoJob.prototype.clientTags = $util.emptyArray; + + /** + * PrestoJob properties. + * @member {Object.} properties + * @memberof google.cloud.dataproc.v1beta2.PrestoJob + * @instance + */ + PrestoJob.prototype.properties = $util.emptyObject; + + /** + * PrestoJob loggingConfig. + * @member {google.cloud.dataproc.v1beta2.ILoggingConfig|null|undefined} loggingConfig + * @memberof google.cloud.dataproc.v1beta2.PrestoJob + * @instance + */ + PrestoJob.prototype.loggingConfig = null; + + // OneOf field names bound to virtual getters and setters + var $oneOfFields; + + /** + * PrestoJob queries. + * @member {"queryFileUri"|"queryList"|undefined} queries + * @memberof google.cloud.dataproc.v1beta2.PrestoJob + * @instance + */ + Object.defineProperty(PrestoJob.prototype, "queries", { + get: $util.oneOfGetter($oneOfFields = ["queryFileUri", "queryList"]), + set: $util.oneOfSetter($oneOfFields) + }); + + /** + * Creates a new PrestoJob instance using the specified properties. + * @function create + * @memberof google.cloud.dataproc.v1beta2.PrestoJob + * @static + * @param {google.cloud.dataproc.v1beta2.IPrestoJob=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1beta2.PrestoJob} PrestoJob instance + */ + PrestoJob.create = function create(properties) { + return new PrestoJob(properties); + }; + + /** + * Encodes the specified PrestoJob message. Does not implicitly {@link google.cloud.dataproc.v1beta2.PrestoJob.verify|verify} messages. + * @function encode + * @memberof google.cloud.dataproc.v1beta2.PrestoJob + * @static + * @param {google.cloud.dataproc.v1beta2.IPrestoJob} message PrestoJob message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + PrestoJob.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.queryFileUri != null && message.hasOwnProperty("queryFileUri")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.queryFileUri); + if (message.queryList != null && message.hasOwnProperty("queryList")) + $root.google.cloud.dataproc.v1beta2.QueryList.encode(message.queryList, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.continueOnFailure != null && message.hasOwnProperty("continueOnFailure")) + writer.uint32(/* id 3, wireType 0 =*/24).bool(message.continueOnFailure); + if (message.outputFormat != null && message.hasOwnProperty("outputFormat")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.outputFormat); + if (message.clientTags != null && message.clientTags.length) + for (var i = 0; i < message.clientTags.length; ++i) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.clientTags[i]); + if (message.properties != null && message.hasOwnProperty("properties")) + for (var keys = Object.keys(message.properties), i = 0; i < keys.length; ++i) + writer.uint32(/* id 6, wireType 2 =*/50).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 2 =*/18).string(message.properties[keys[i]]).ldelim(); + if (message.loggingConfig != null && message.hasOwnProperty("loggingConfig")) + $root.google.cloud.dataproc.v1beta2.LoggingConfig.encode(message.loggingConfig, writer.uint32(/* id 7, wireType 2 =*/58).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified PrestoJob message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1beta2.PrestoJob.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.dataproc.v1beta2.PrestoJob + * @static + * @param {google.cloud.dataproc.v1beta2.IPrestoJob} message PrestoJob message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + PrestoJob.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a PrestoJob message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.dataproc.v1beta2.PrestoJob + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.dataproc.v1beta2.PrestoJob} PrestoJob + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + PrestoJob.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1beta2.PrestoJob(), key; + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.queryFileUri = reader.string(); + break; + case 2: + message.queryList = $root.google.cloud.dataproc.v1beta2.QueryList.decode(reader, reader.uint32()); + break; + case 3: + message.continueOnFailure = reader.bool(); + break; + case 4: + message.outputFormat = reader.string(); + break; + case 5: + if (!(message.clientTags && message.clientTags.length)) + message.clientTags = []; + message.clientTags.push(reader.string()); + break; + case 6: + reader.skip().pos++; + if (message.properties === $util.emptyObject) + message.properties = {}; + key = reader.string(); + reader.pos++; + message.properties[key] = reader.string(); + break; + case 7: + message.loggingConfig = $root.google.cloud.dataproc.v1beta2.LoggingConfig.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a PrestoJob message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.dataproc.v1beta2.PrestoJob + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.dataproc.v1beta2.PrestoJob} PrestoJob + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + PrestoJob.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a PrestoJob message. + * @function verify + * @memberof google.cloud.dataproc.v1beta2.PrestoJob + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + PrestoJob.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + var properties = {}; + if (message.queryFileUri != null && message.hasOwnProperty("queryFileUri")) { + properties.queries = 1; + if (!$util.isString(message.queryFileUri)) + return "queryFileUri: string expected"; + } + if (message.queryList != null && message.hasOwnProperty("queryList")) { + if (properties.queries === 1) + return "queries: multiple values"; + properties.queries = 1; + { + var error = $root.google.cloud.dataproc.v1beta2.QueryList.verify(message.queryList); + if (error) + return "queryList." + error; + } + } + if (message.continueOnFailure != null && message.hasOwnProperty("continueOnFailure")) + if (typeof message.continueOnFailure !== "boolean") + return "continueOnFailure: boolean expected"; + if (message.outputFormat != null && message.hasOwnProperty("outputFormat")) + if (!$util.isString(message.outputFormat)) + return "outputFormat: string expected"; + if (message.clientTags != null && message.hasOwnProperty("clientTags")) { + if (!Array.isArray(message.clientTags)) + return "clientTags: array expected"; + for (var i = 0; i < message.clientTags.length; ++i) + if (!$util.isString(message.clientTags[i])) + return "clientTags: string[] expected"; + } + if (message.properties != null && message.hasOwnProperty("properties")) { + if (!$util.isObject(message.properties)) + return "properties: object expected"; + var key = Object.keys(message.properties); + for (var i = 0; i < key.length; ++i) + if (!$util.isString(message.properties[key[i]])) + return "properties: string{k:string} expected"; + } + if (message.loggingConfig != null && message.hasOwnProperty("loggingConfig")) { + var error = $root.google.cloud.dataproc.v1beta2.LoggingConfig.verify(message.loggingConfig); + if (error) + return "loggingConfig." + error; + } + return null; + }; + + /** + * Creates a PrestoJob message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.dataproc.v1beta2.PrestoJob + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.dataproc.v1beta2.PrestoJob} PrestoJob + */ + PrestoJob.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1beta2.PrestoJob) + return object; + var message = new $root.google.cloud.dataproc.v1beta2.PrestoJob(); + if (object.queryFileUri != null) + message.queryFileUri = String(object.queryFileUri); + if (object.queryList != null) { + if (typeof object.queryList !== "object") + throw TypeError(".google.cloud.dataproc.v1beta2.PrestoJob.queryList: object expected"); + message.queryList = $root.google.cloud.dataproc.v1beta2.QueryList.fromObject(object.queryList); + } + if (object.continueOnFailure != null) + message.continueOnFailure = Boolean(object.continueOnFailure); + if (object.outputFormat != null) + message.outputFormat = String(object.outputFormat); + if (object.clientTags) { + if (!Array.isArray(object.clientTags)) + throw TypeError(".google.cloud.dataproc.v1beta2.PrestoJob.clientTags: array expected"); + message.clientTags = []; + for (var i = 0; i < object.clientTags.length; ++i) + message.clientTags[i] = String(object.clientTags[i]); + } + if (object.properties) { + if (typeof object.properties !== "object") + throw TypeError(".google.cloud.dataproc.v1beta2.PrestoJob.properties: object expected"); + message.properties = {}; + for (var keys = Object.keys(object.properties), i = 0; i < keys.length; ++i) + message.properties[keys[i]] = String(object.properties[keys[i]]); + } + if (object.loggingConfig != null) { + if (typeof object.loggingConfig !== "object") + throw TypeError(".google.cloud.dataproc.v1beta2.PrestoJob.loggingConfig: object expected"); + message.loggingConfig = $root.google.cloud.dataproc.v1beta2.LoggingConfig.fromObject(object.loggingConfig); + } + return message; + }; + + /** + * Creates a plain object from a PrestoJob message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.dataproc.v1beta2.PrestoJob + * @static + * @param {google.cloud.dataproc.v1beta2.PrestoJob} message PrestoJob + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + PrestoJob.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.arrays || options.defaults) + object.clientTags = []; + if (options.objects || options.defaults) + object.properties = {}; + if (options.defaults) { + object.continueOnFailure = false; + object.outputFormat = ""; + object.loggingConfig = null; + } + if (message.queryFileUri != null && message.hasOwnProperty("queryFileUri")) { + object.queryFileUri = message.queryFileUri; + if (options.oneofs) + object.queries = "queryFileUri"; + } + if (message.queryList != null && message.hasOwnProperty("queryList")) { + object.queryList = $root.google.cloud.dataproc.v1beta2.QueryList.toObject(message.queryList, options); + if (options.oneofs) + object.queries = "queryList"; + } + if (message.continueOnFailure != null && message.hasOwnProperty("continueOnFailure")) + object.continueOnFailure = message.continueOnFailure; + if (message.outputFormat != null && message.hasOwnProperty("outputFormat")) + object.outputFormat = message.outputFormat; + if (message.clientTags && message.clientTags.length) { + object.clientTags = []; + for (var j = 0; j < message.clientTags.length; ++j) + object.clientTags[j] = message.clientTags[j]; + } + var keys2; + if (message.properties && (keys2 = Object.keys(message.properties)).length) { + object.properties = {}; + for (var j = 0; j < keys2.length; ++j) + object.properties[keys2[j]] = message.properties[keys2[j]]; + } + if (message.loggingConfig != null && message.hasOwnProperty("loggingConfig")) + object.loggingConfig = $root.google.cloud.dataproc.v1beta2.LoggingConfig.toObject(message.loggingConfig, options); + return object; + }; + + /** + * Converts this PrestoJob to JSON. + * @function toJSON + * @memberof google.cloud.dataproc.v1beta2.PrestoJob + * @instance + * @returns {Object.} JSON object + */ + PrestoJob.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + return PrestoJob; + })(); + v1beta2.JobPlacement = (function() { /** @@ -39677,6 +40984,7 @@ * @property {google.cloud.dataproc.v1beta2.IPigJob|null} [pigJob] Job pigJob * @property {google.cloud.dataproc.v1beta2.ISparkRJob|null} [sparkRJob] Job sparkRJob * @property {google.cloud.dataproc.v1beta2.ISparkSqlJob|null} [sparkSqlJob] Job sparkSqlJob + * @property {google.cloud.dataproc.v1beta2.IPrestoJob|null} [prestoJob] Job prestoJob * @property {google.cloud.dataproc.v1beta2.IJobStatus|null} [status] Job status * @property {Array.|null} [statusHistory] Job statusHistory * @property {Array.|null} [yarnApplications] Job yarnApplications @@ -39686,6 +40994,7 @@ * @property {Object.|null} [labels] Job labels * @property {google.cloud.dataproc.v1beta2.IJobScheduling|null} [scheduling] Job scheduling * @property {string|null} [jobUuid] Job jobUuid + * @property {boolean|null} [done] Job done */ /** @@ -39778,6 +41087,14 @@ */ Job.prototype.sparkSqlJob = null; + /** + * Job prestoJob. + * @member {google.cloud.dataproc.v1beta2.IPrestoJob|null|undefined} prestoJob + * @memberof google.cloud.dataproc.v1beta2.Job + * @instance + */ + Job.prototype.prestoJob = null; + /** * Job status. * @member {google.cloud.dataproc.v1beta2.IJobStatus|null|undefined} status @@ -39850,17 +41167,25 @@ */ Job.prototype.jobUuid = ""; + /** + * Job done. + * @member {boolean} done + * @memberof google.cloud.dataproc.v1beta2.Job + * @instance + */ + Job.prototype.done = false; + // OneOf field names bound to virtual getters and setters var $oneOfFields; /** * Job typeJob. - * @member {"hadoopJob"|"sparkJob"|"pysparkJob"|"hiveJob"|"pigJob"|"sparkRJob"|"sparkSqlJob"|undefined} typeJob + * @member {"hadoopJob"|"sparkJob"|"pysparkJob"|"hiveJob"|"pigJob"|"sparkRJob"|"sparkSqlJob"|"prestoJob"|undefined} typeJob * @memberof google.cloud.dataproc.v1beta2.Job * @instance */ Object.defineProperty(Job.prototype, "typeJob", { - get: $util.oneOfGetter($oneOfFields = ["hadoopJob", "sparkJob", "pysparkJob", "hiveJob", "pigJob", "sparkRJob", "sparkSqlJob"]), + get: $util.oneOfGetter($oneOfFields = ["hadoopJob", "sparkJob", "pysparkJob", "hiveJob", "pigJob", "sparkRJob", "sparkSqlJob", "prestoJob"]), set: $util.oneOfSetter($oneOfFields) }); @@ -39927,6 +41252,10 @@ $root.google.cloud.dataproc.v1beta2.SparkRJob.encode(message.sparkRJob, writer.uint32(/* id 21, wireType 2 =*/170).fork()).ldelim(); if (message.jobUuid != null && message.hasOwnProperty("jobUuid")) writer.uint32(/* id 22, wireType 2 =*/178).string(message.jobUuid); + if (message.prestoJob != null && message.hasOwnProperty("prestoJob")) + $root.google.cloud.dataproc.v1beta2.PrestoJob.encode(message.prestoJob, writer.uint32(/* id 23, wireType 2 =*/186).fork()).ldelim(); + if (message.done != null && message.hasOwnProperty("done")) + writer.uint32(/* id 24, wireType 0 =*/192).bool(message.done); return writer; }; @@ -39988,6 +41317,9 @@ case 12: message.sparkSqlJob = $root.google.cloud.dataproc.v1beta2.SparkSqlJob.decode(reader, reader.uint32()); break; + case 23: + message.prestoJob = $root.google.cloud.dataproc.v1beta2.PrestoJob.decode(reader, reader.uint32()); + break; case 8: message.status = $root.google.cloud.dataproc.v1beta2.JobStatus.decode(reader, reader.uint32()); break; @@ -40024,6 +41356,9 @@ case 22: message.jobUuid = reader.string(); break; + case 24: + message.done = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -40138,6 +41473,16 @@ return "sparkSqlJob." + error; } } + if (message.prestoJob != null && message.hasOwnProperty("prestoJob")) { + if (properties.typeJob === 1) + return "typeJob: multiple values"; + properties.typeJob = 1; + { + var error = $root.google.cloud.dataproc.v1beta2.PrestoJob.verify(message.prestoJob); + if (error) + return "prestoJob." + error; + } + } if (message.status != null && message.hasOwnProperty("status")) { var error = $root.google.cloud.dataproc.v1beta2.JobStatus.verify(message.status); if (error) @@ -40186,6 +41531,9 @@ if (message.jobUuid != null && message.hasOwnProperty("jobUuid")) if (!$util.isString(message.jobUuid)) return "jobUuid: string expected"; + if (message.done != null && message.hasOwnProperty("done")) + if (typeof message.done !== "boolean") + return "done: boolean expected"; return null; }; @@ -40246,6 +41594,11 @@ throw TypeError(".google.cloud.dataproc.v1beta2.Job.sparkSqlJob: object expected"); message.sparkSqlJob = $root.google.cloud.dataproc.v1beta2.SparkSqlJob.fromObject(object.sparkSqlJob); } + if (object.prestoJob != null) { + if (typeof object.prestoJob !== "object") + throw TypeError(".google.cloud.dataproc.v1beta2.Job.prestoJob: object expected"); + message.prestoJob = $root.google.cloud.dataproc.v1beta2.PrestoJob.fromObject(object.prestoJob); + } if (object.status != null) { if (typeof object.status !== "object") throw TypeError(".google.cloud.dataproc.v1beta2.Job.status: object expected"); @@ -40291,6 +41644,8 @@ } if (object.jobUuid != null) message.jobUuid = String(object.jobUuid); + if (object.done != null) + message.done = Boolean(object.done); return message; }; @@ -40322,6 +41677,7 @@ object.driverOutputResourceUri = ""; object.scheduling = null; object.jobUuid = ""; + object.done = false; } if (message.reference != null && message.hasOwnProperty("reference")) object.reference = $root.google.cloud.dataproc.v1beta2.JobReference.toObject(message.reference, options); @@ -40390,6 +41746,13 @@ } if (message.jobUuid != null && message.hasOwnProperty("jobUuid")) object.jobUuid = message.jobUuid; + if (message.prestoJob != null && message.hasOwnProperty("prestoJob")) { + object.prestoJob = $root.google.cloud.dataproc.v1beta2.PrestoJob.toObject(message.prestoJob, options); + if (options.oneofs) + object.typeJob = "prestoJob"; + } + if (message.done != null && message.hasOwnProperty("done")) + object.done = message.done; return object; }; @@ -40594,6 +41957,270 @@ return JobScheduling; })(); + v1beta2.JobMetadata = (function() { + + /** + * Properties of a JobMetadata. + * @memberof google.cloud.dataproc.v1beta2 + * @interface IJobMetadata + * @property {string|null} [jobId] JobMetadata jobId + * @property {google.cloud.dataproc.v1beta2.IJobStatus|null} [status] JobMetadata status + * @property {string|null} [operationType] JobMetadata operationType + * @property {google.protobuf.ITimestamp|null} [startTime] JobMetadata startTime + */ + + /** + * Constructs a new JobMetadata. + * @memberof google.cloud.dataproc.v1beta2 + * @classdesc Represents a JobMetadata. + * @implements IJobMetadata + * @constructor + * @param {google.cloud.dataproc.v1beta2.IJobMetadata=} [properties] Properties to set + */ + function JobMetadata(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * JobMetadata jobId. + * @member {string} jobId + * @memberof google.cloud.dataproc.v1beta2.JobMetadata + * @instance + */ + JobMetadata.prototype.jobId = ""; + + /** + * JobMetadata status. + * @member {google.cloud.dataproc.v1beta2.IJobStatus|null|undefined} status + * @memberof google.cloud.dataproc.v1beta2.JobMetadata + * @instance + */ + JobMetadata.prototype.status = null; + + /** + * JobMetadata operationType. + * @member {string} operationType + * @memberof google.cloud.dataproc.v1beta2.JobMetadata + * @instance + */ + JobMetadata.prototype.operationType = ""; + + /** + * JobMetadata startTime. + * @member {google.protobuf.ITimestamp|null|undefined} startTime + * @memberof google.cloud.dataproc.v1beta2.JobMetadata + * @instance + */ + JobMetadata.prototype.startTime = null; + + /** + * Creates a new JobMetadata instance using the specified properties. + * @function create + * @memberof google.cloud.dataproc.v1beta2.JobMetadata + * @static + * @param {google.cloud.dataproc.v1beta2.IJobMetadata=} [properties] Properties to set + * @returns {google.cloud.dataproc.v1beta2.JobMetadata} JobMetadata instance + */ + JobMetadata.create = function create(properties) { + return new JobMetadata(properties); + }; + + /** + * Encodes the specified JobMetadata message. Does not implicitly {@link google.cloud.dataproc.v1beta2.JobMetadata.verify|verify} messages. + * @function encode + * @memberof google.cloud.dataproc.v1beta2.JobMetadata + * @static + * @param {google.cloud.dataproc.v1beta2.IJobMetadata} message JobMetadata message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + JobMetadata.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.jobId != null && message.hasOwnProperty("jobId")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.jobId); + if (message.status != null && message.hasOwnProperty("status")) + $root.google.cloud.dataproc.v1beta2.JobStatus.encode(message.status, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.operationType != null && message.hasOwnProperty("operationType")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.operationType); + if (message.startTime != null && message.hasOwnProperty("startTime")) + $root.google.protobuf.Timestamp.encode(message.startTime, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified JobMetadata message, length delimited. Does not implicitly {@link google.cloud.dataproc.v1beta2.JobMetadata.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.dataproc.v1beta2.JobMetadata + * @static + * @param {google.cloud.dataproc.v1beta2.IJobMetadata} message JobMetadata message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + JobMetadata.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a JobMetadata message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.dataproc.v1beta2.JobMetadata + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.dataproc.v1beta2.JobMetadata} JobMetadata + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + JobMetadata.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.dataproc.v1beta2.JobMetadata(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.jobId = reader.string(); + break; + case 2: + message.status = $root.google.cloud.dataproc.v1beta2.JobStatus.decode(reader, reader.uint32()); + break; + case 3: + message.operationType = reader.string(); + break; + case 4: + message.startTime = $root.google.protobuf.Timestamp.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a JobMetadata message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.dataproc.v1beta2.JobMetadata + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.dataproc.v1beta2.JobMetadata} JobMetadata + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + JobMetadata.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a JobMetadata message. + * @function verify + * @memberof google.cloud.dataproc.v1beta2.JobMetadata + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + JobMetadata.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.jobId != null && message.hasOwnProperty("jobId")) + if (!$util.isString(message.jobId)) + return "jobId: string expected"; + if (message.status != null && message.hasOwnProperty("status")) { + var error = $root.google.cloud.dataproc.v1beta2.JobStatus.verify(message.status); + if (error) + return "status." + error; + } + if (message.operationType != null && message.hasOwnProperty("operationType")) + if (!$util.isString(message.operationType)) + return "operationType: string expected"; + if (message.startTime != null && message.hasOwnProperty("startTime")) { + var error = $root.google.protobuf.Timestamp.verify(message.startTime); + if (error) + return "startTime." + error; + } + return null; + }; + + /** + * Creates a JobMetadata message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.dataproc.v1beta2.JobMetadata + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.dataproc.v1beta2.JobMetadata} JobMetadata + */ + JobMetadata.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.dataproc.v1beta2.JobMetadata) + return object; + var message = new $root.google.cloud.dataproc.v1beta2.JobMetadata(); + if (object.jobId != null) + message.jobId = String(object.jobId); + if (object.status != null) { + if (typeof object.status !== "object") + throw TypeError(".google.cloud.dataproc.v1beta2.JobMetadata.status: object expected"); + message.status = $root.google.cloud.dataproc.v1beta2.JobStatus.fromObject(object.status); + } + if (object.operationType != null) + message.operationType = String(object.operationType); + if (object.startTime != null) { + if (typeof object.startTime !== "object") + throw TypeError(".google.cloud.dataproc.v1beta2.JobMetadata.startTime: object expected"); + message.startTime = $root.google.protobuf.Timestamp.fromObject(object.startTime); + } + return message; + }; + + /** + * Creates a plain object from a JobMetadata message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.dataproc.v1beta2.JobMetadata + * @static + * @param {google.cloud.dataproc.v1beta2.JobMetadata} message JobMetadata + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + JobMetadata.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + object.jobId = ""; + object.status = null; + object.operationType = ""; + object.startTime = null; + } + if (message.jobId != null && message.hasOwnProperty("jobId")) + object.jobId = message.jobId; + if (message.status != null && message.hasOwnProperty("status")) + object.status = $root.google.cloud.dataproc.v1beta2.JobStatus.toObject(message.status, options); + if (message.operationType != null && message.hasOwnProperty("operationType")) + object.operationType = message.operationType; + if (message.startTime != null && message.hasOwnProperty("startTime")) + object.startTime = $root.google.protobuf.Timestamp.toObject(message.startTime, options); + return object; + }; + + /** + * Converts this JobMetadata to JSON. + * @function toJSON + * @memberof google.cloud.dataproc.v1beta2.JobMetadata + * @instance + * @returns {Object.} JSON object + */ + JobMetadata.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + return JobMetadata; + })(); + v1beta2.SubmitJobRequest = (function() { /** @@ -44586,7 +46213,9 @@ * @property {google.cloud.dataproc.v1beta2.IPySparkJob|null} [pysparkJob] OrderedJob pysparkJob * @property {google.cloud.dataproc.v1beta2.IHiveJob|null} [hiveJob] OrderedJob hiveJob * @property {google.cloud.dataproc.v1beta2.IPigJob|null} [pigJob] OrderedJob pigJob + * @property {google.cloud.dataproc.v1beta2.ISparkRJob|null} [sparkRJob] OrderedJob sparkRJob * @property {google.cloud.dataproc.v1beta2.ISparkSqlJob|null} [sparkSqlJob] OrderedJob sparkSqlJob + * @property {google.cloud.dataproc.v1beta2.IPrestoJob|null} [prestoJob] OrderedJob prestoJob * @property {Object.|null} [labels] OrderedJob labels * @property {google.cloud.dataproc.v1beta2.IJobScheduling|null} [scheduling] OrderedJob scheduling * @property {Array.|null} [prerequisiteStepIds] OrderedJob prerequisiteStepIds @@ -44657,6 +46286,14 @@ */ OrderedJob.prototype.pigJob = null; + /** + * OrderedJob sparkRJob. + * @member {google.cloud.dataproc.v1beta2.ISparkRJob|null|undefined} sparkRJob + * @memberof google.cloud.dataproc.v1beta2.OrderedJob + * @instance + */ + OrderedJob.prototype.sparkRJob = null; + /** * OrderedJob sparkSqlJob. * @member {google.cloud.dataproc.v1beta2.ISparkSqlJob|null|undefined} sparkSqlJob @@ -44665,6 +46302,14 @@ */ OrderedJob.prototype.sparkSqlJob = null; + /** + * OrderedJob prestoJob. + * @member {google.cloud.dataproc.v1beta2.IPrestoJob|null|undefined} prestoJob + * @memberof google.cloud.dataproc.v1beta2.OrderedJob + * @instance + */ + OrderedJob.prototype.prestoJob = null; + /** * OrderedJob labels. * @member {Object.} labels @@ -44694,12 +46339,12 @@ /** * OrderedJob jobType. - * @member {"hadoopJob"|"sparkJob"|"pysparkJob"|"hiveJob"|"pigJob"|"sparkSqlJob"|undefined} jobType + * @member {"hadoopJob"|"sparkJob"|"pysparkJob"|"hiveJob"|"pigJob"|"sparkRJob"|"sparkSqlJob"|"prestoJob"|undefined} jobType * @memberof google.cloud.dataproc.v1beta2.OrderedJob * @instance */ Object.defineProperty(OrderedJob.prototype, "jobType", { - get: $util.oneOfGetter($oneOfFields = ["hadoopJob", "sparkJob", "pysparkJob", "hiveJob", "pigJob", "sparkSqlJob"]), + get: $util.oneOfGetter($oneOfFields = ["hadoopJob", "sparkJob", "pysparkJob", "hiveJob", "pigJob", "sparkRJob", "sparkSqlJob", "prestoJob"]), set: $util.oneOfSetter($oneOfFields) }); @@ -44749,6 +46394,10 @@ if (message.prerequisiteStepIds != null && message.prerequisiteStepIds.length) for (var i = 0; i < message.prerequisiteStepIds.length; ++i) writer.uint32(/* id 10, wireType 2 =*/82).string(message.prerequisiteStepIds[i]); + if (message.sparkRJob != null && message.hasOwnProperty("sparkRJob")) + $root.google.cloud.dataproc.v1beta2.SparkRJob.encode(message.sparkRJob, writer.uint32(/* id 11, wireType 2 =*/90).fork()).ldelim(); + if (message.prestoJob != null && message.hasOwnProperty("prestoJob")) + $root.google.cloud.dataproc.v1beta2.PrestoJob.encode(message.prestoJob, writer.uint32(/* id 12, wireType 2 =*/98).fork()).ldelim(); return writer; }; @@ -44801,9 +46450,15 @@ case 6: message.pigJob = $root.google.cloud.dataproc.v1beta2.PigJob.decode(reader, reader.uint32()); break; + case 11: + message.sparkRJob = $root.google.cloud.dataproc.v1beta2.SparkRJob.decode(reader, reader.uint32()); + break; case 7: message.sparkSqlJob = $root.google.cloud.dataproc.v1beta2.SparkSqlJob.decode(reader, reader.uint32()); break; + case 12: + message.prestoJob = $root.google.cloud.dataproc.v1beta2.PrestoJob.decode(reader, reader.uint32()); + break; case 8: reader.skip().pos++; if (message.labels === $util.emptyObject) @@ -44907,6 +46562,16 @@ return "pigJob." + error; } } + if (message.sparkRJob != null && message.hasOwnProperty("sparkRJob")) { + if (properties.jobType === 1) + return "jobType: multiple values"; + properties.jobType = 1; + { + var error = $root.google.cloud.dataproc.v1beta2.SparkRJob.verify(message.sparkRJob); + if (error) + return "sparkRJob." + error; + } + } if (message.sparkSqlJob != null && message.hasOwnProperty("sparkSqlJob")) { if (properties.jobType === 1) return "jobType: multiple values"; @@ -44917,6 +46582,16 @@ return "sparkSqlJob." + error; } } + if (message.prestoJob != null && message.hasOwnProperty("prestoJob")) { + if (properties.jobType === 1) + return "jobType: multiple values"; + properties.jobType = 1; + { + var error = $root.google.cloud.dataproc.v1beta2.PrestoJob.verify(message.prestoJob); + if (error) + return "prestoJob." + error; + } + } if (message.labels != null && message.hasOwnProperty("labels")) { if (!$util.isObject(message.labels)) return "labels: object expected"; @@ -44979,11 +46654,21 @@ throw TypeError(".google.cloud.dataproc.v1beta2.OrderedJob.pigJob: object expected"); message.pigJob = $root.google.cloud.dataproc.v1beta2.PigJob.fromObject(object.pigJob); } + if (object.sparkRJob != null) { + if (typeof object.sparkRJob !== "object") + throw TypeError(".google.cloud.dataproc.v1beta2.OrderedJob.sparkRJob: object expected"); + message.sparkRJob = $root.google.cloud.dataproc.v1beta2.SparkRJob.fromObject(object.sparkRJob); + } if (object.sparkSqlJob != null) { if (typeof object.sparkSqlJob !== "object") throw TypeError(".google.cloud.dataproc.v1beta2.OrderedJob.sparkSqlJob: object expected"); message.sparkSqlJob = $root.google.cloud.dataproc.v1beta2.SparkSqlJob.fromObject(object.sparkSqlJob); } + if (object.prestoJob != null) { + if (typeof object.prestoJob !== "object") + throw TypeError(".google.cloud.dataproc.v1beta2.OrderedJob.prestoJob: object expected"); + message.prestoJob = $root.google.cloud.dataproc.v1beta2.PrestoJob.fromObject(object.prestoJob); + } if (object.labels) { if (typeof object.labels !== "object") throw TypeError(".google.cloud.dataproc.v1beta2.OrderedJob.labels: object expected"); @@ -45072,6 +46757,16 @@ for (var j = 0; j < message.prerequisiteStepIds.length; ++j) object.prerequisiteStepIds[j] = message.prerequisiteStepIds[j]; } + if (message.sparkRJob != null && message.hasOwnProperty("sparkRJob")) { + object.sparkRJob = $root.google.cloud.dataproc.v1beta2.SparkRJob.toObject(message.sparkRJob, options); + if (options.oneofs) + object.jobType = "sparkRJob"; + } + if (message.prestoJob != null && message.hasOwnProperty("prestoJob")) { + object.prestoJob = $root.google.cloud.dataproc.v1beta2.PrestoJob.toObject(message.prestoJob, options); + if (options.oneofs) + object.jobType = "prestoJob"; + } return object; }; diff --git a/packages/google-cloud-dataproc/protos/protos.json b/packages/google-cloud-dataproc/protos/protos.json index f716a877783..7cd7deba63a 100644 --- a/packages/google-cloud-dataproc/protos/protos.json +++ b/packages/google-cloud-dataproc/protos/protos.json @@ -423,7 +423,10 @@ }, "metrics": { "type": "ClusterMetrics", - "id": 9 + "id": 9, + "options": { + "(google.api.field_behavior)": "OUTPUT_ONLY" + } } } }, @@ -637,7 +640,7 @@ "type": "bool", "id": 6, "options": { - "(google.api.field_behavior)": "OPTIONAL" + "(google.api.field_behavior)": "OUTPUT_ONLY" } }, "managedGroupConfig": { @@ -1271,7 +1274,9 @@ "ANACONDA": 5, "HIVE_WEBHCAT": 3, "JUPYTER": 1, - "ZEPPELIN": 4 + "PRESTO": 6, + "ZEPPELIN": 4, + "ZOOKEEPER": 8 } }, "JobController": { @@ -1289,6 +1294,17 @@ "(google.api.method_signature)": "project_id,region,job" } }, + "SubmitJobAsOperation": { + "requestType": "SubmitJobRequest", + "responseType": "google.longrunning.Operation", + "options": { + "(google.api.http).post": "/v1/projects/{project_id}/regions/{region}/jobs:submitAsOperation", + "(google.api.http).body": "*", + "(google.api.method_signature)": "project_id, region, job", + "(google.longrunning.operation_info).response_type": "Job", + "(google.longrunning.operation_info).metadata_type": "JobMetadata" + } + }, "GetJob": { "requestType": "GetJobRequest", "responseType": "Job", @@ -2118,6 +2134,13 @@ "options": { "(google.api.field_behavior)": "OUTPUT_ONLY" } + }, + "done": { + "type": "bool", + "id": 24, + "options": { + "(google.api.field_behavior)": "OUTPUT_ONLY" + } } } }, @@ -2164,6 +2187,38 @@ } } }, + "JobMetadata": { + "fields": { + "jobId": { + "type": "string", + "id": 1, + "options": { + "(google.api.field_behavior)": "OUTPUT_ONLY" + } + }, + "status": { + "type": "JobStatus", + "id": 2, + "options": { + "(google.api.field_behavior)": "OUTPUT_ONLY" + } + }, + "operationType": { + "type": "string", + "id": 3, + "options": { + "(google.api.field_behavior)": "OUTPUT_ONLY" + } + }, + "startTime": { + "type": "google.protobuf.Timestamp", + "id": 4, + "options": { + "(google.api.field_behavior)": "OUTPUT_ONLY" + } + } + } + }, "GetJobRequest": { "fields": { "projectId": { @@ -2696,7 +2751,9 @@ "pysparkJob", "hiveJob", "pigJob", - "sparkSqlJob" + "sparkRJob", + "sparkSqlJob", + "prestoJob" ] } }, @@ -2728,10 +2785,18 @@ "type": "PigJob", "id": 6 }, + "sparkRJob": { + "type": "SparkRJob", + "id": 11 + }, "sparkSqlJob": { "type": "SparkSqlJob", "id": 7 }, + "prestoJob": { + "type": "PrestoJob", + "id": 12 + }, "labels": { "keyType": "string", "type": "string", @@ -3698,6 +3763,45 @@ "options": { "(google.api.field_behavior)": "OPTIONAL" } + }, + "gkeClusterConfig": { + "type": "GkeClusterConfig", + "id": 19, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } + } + } + }, + "GkeClusterConfig": { + "fields": { + "namespacedGkeDeploymentTarget": { + "type": "NamespacedGkeDeploymentTarget", + "id": 1, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } + } + }, + "nested": { + "NamespacedGkeDeploymentTarget": { + "fields": { + "targetGkeCluster": { + "type": "string", + "id": 1, + "options": { + "(google.api.field_behavior)": "OPTIONAL", + "(google.api.resource_reference).type": "container.googleapis.com/Cluster" + } + }, + "clusterNamespace": { + "type": "string", + "id": 2, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } + } + } } } }, @@ -3848,7 +3952,7 @@ "type": "bool", "id": 6, "options": { - "(google.api.field_behavior)": "OPTIONAL" + "(google.api.field_behavior)": "OUTPUT_ONLY" } }, "managedGroupConfig": { @@ -4138,7 +4242,10 @@ "RUNNING": 2, "ERROR": 3, "DELETING": 4, - "UPDATING": 5 + "UPDATING": 5, + "STOPPING": 6, + "STOPPED": 7, + "STARTING": 8 } }, "Substate": { @@ -4471,11 +4578,15 @@ "values": { "COMPONENT_UNSPECIFIED": 0, "ANACONDA": 5, + "DOCKER": 13, "DRUID": 9, + "FLINK": 14, "HIVE_WEBHCAT": 3, "JUPYTER": 1, "KERBEROS": 7, "PRESTO": 6, + "RANGER": 12, + "SOLR": 10, "ZEPPELIN": 4, "ZOOKEEPER": 8 } @@ -4495,6 +4606,17 @@ "(google.api.method_signature)": "project_id, region, job" } }, + "SubmitJobAsOperation": { + "requestType": "SubmitJobRequest", + "responseType": "google.longrunning.Operation", + "options": { + "(google.api.http).post": "/v1beta2/projects/{project_id}/regions/{region}/jobs:submitAsOperation", + "(google.api.http).body": "*", + "(google.api.method_signature)": "project_id, region, job", + "(google.longrunning.operation_info).response_type": "Job", + "(google.longrunning.operation_info).metadata_type": "JobMetadata" + } + }, "GetJob": { "requestType": "GetJobRequest", "responseType": "Job", @@ -4984,6 +5106,63 @@ } } }, + "PrestoJob": { + "oneofs": { + "queries": { + "oneof": [ + "queryFileUri", + "queryList" + ] + } + }, + "fields": { + "queryFileUri": { + "type": "string", + "id": 1 + }, + "queryList": { + "type": "QueryList", + "id": 2 + }, + "continueOnFailure": { + "type": "bool", + "id": 3, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } + }, + "outputFormat": { + "type": "string", + "id": 4, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } + }, + "clientTags": { + "rule": "repeated", + "type": "string", + "id": 5, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } + }, + "properties": { + "keyType": "string", + "type": "string", + "id": 6, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } + }, + "loggingConfig": { + "type": "LoggingConfig", + "id": 7, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } + } + } + }, "JobPlacement": { "fields": { "clusterName": { @@ -5133,7 +5312,8 @@ "hiveJob", "pigJob", "sparkRJob", - "sparkSqlJob" + "sparkSqlJob", + "prestoJob" ] } }, @@ -5154,31 +5334,59 @@ }, "hadoopJob": { "type": "HadoopJob", - "id": 3 + "id": 3, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } }, "sparkJob": { "type": "SparkJob", - "id": 4 + "id": 4, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } }, "pysparkJob": { "type": "PySparkJob", - "id": 5 + "id": 5, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } }, "hiveJob": { "type": "HiveJob", - "id": 6 + "id": 6, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } }, "pigJob": { "type": "PigJob", - "id": 7 + "id": 7, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } }, "sparkRJob": { "type": "SparkRJob", - "id": 21 + "id": 21, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } }, "sparkSqlJob": { "type": "SparkSqlJob", - "id": 12 + "id": 12, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } + }, + "prestoJob": { + "type": "PrestoJob", + "id": 23, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } }, "status": { "type": "JobStatus", @@ -5245,6 +5453,13 @@ "options": { "(google.api.field_behavior)": "OUTPUT_ONLY" } + }, + "done": { + "type": "bool", + "id": 24, + "options": { + "(google.api.field_behavior)": "OUTPUT_ONLY" + } } } }, @@ -5259,6 +5474,38 @@ } } }, + "JobMetadata": { + "fields": { + "jobId": { + "type": "string", + "id": 1, + "options": { + "(google.api.field_behavior)": "OUTPUT_ONLY" + } + }, + "status": { + "type": "JobStatus", + "id": 2, + "options": { + "(google.api.field_behavior)": "OUTPUT_ONLY" + } + }, + "operationType": { + "type": "string", + "id": 3, + "options": { + "(google.api.field_behavior)": "OUTPUT_ONLY" + } + }, + "startTime": { + "type": "google.protobuf.Timestamp", + "id": 4, + "options": { + "(google.api.field_behavior)": "OUTPUT_ONLY" + } + } + } + }, "SubmitJobRequest": { "fields": { "projectId": { @@ -5766,14 +6013,19 @@ "pysparkJob", "hiveJob", "pigJob", - "sparkSqlJob" + "sparkRJob", + "sparkSqlJob", + "prestoJob" ] } }, "fields": { "stepId": { "type": "string", - "id": 1 + "id": 1, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } }, "hadoopJob": { "type": "HadoopJob", @@ -5795,23 +6047,40 @@ "type": "PigJob", "id": 6 }, + "sparkRJob": { + "type": "SparkRJob", + "id": 11 + }, "sparkSqlJob": { "type": "SparkSqlJob", "id": 7 }, + "prestoJob": { + "type": "PrestoJob", + "id": 12 + }, "labels": { "keyType": "string", "type": "string", - "id": 8 + "id": 8, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } }, "scheduling": { "type": "JobScheduling", - "id": 9 + "id": 9, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } }, "prerequisiteStepIds": { "rule": "repeated", "type": "string", - "id": 10 + "id": 10, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } } } }, diff --git a/packages/google-cloud-dataproc/src/v1/autoscaling_policy_service_client.ts b/packages/google-cloud-dataproc/src/v1/autoscaling_policy_service_client.ts index e2efa8998b1..6c9efbae331 100644 --- a/packages/google-cloud-dataproc/src/v1/autoscaling_policy_service_client.ts +++ b/packages/google-cloud-dataproc/src/v1/autoscaling_policy_service_client.ts @@ -366,7 +366,7 @@ export class AutoscalingPolicyServiceClient { * of the location has the following format: * `projects/{project_id}/locations/{location}` * @param {google.cloud.dataproc.v1.AutoscalingPolicy} request.policy - * The autoscaling policy to create. + * Required. The autoscaling policy to create. * @param {object} [options] * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. * @returns {Promise} - The promise which resolves to an array. diff --git a/packages/google-cloud-dataproc/src/v1/cluster_controller_client.ts b/packages/google-cloud-dataproc/src/v1/cluster_controller_client.ts index d6c5b7919a7..238fc8d53fa 100644 --- a/packages/google-cloud-dataproc/src/v1/cluster_controller_client.ts +++ b/packages/google-cloud-dataproc/src/v1/cluster_controller_client.ts @@ -985,7 +985,7 @@ export class ClusterControllerClient { > ): void; /** - * Lists all regions/{region}/clusters in a project. + * Lists all regions/{region}/clusters in a project alphabetically. * * @param {Object} request * The request object that will be sent. diff --git a/packages/google-cloud-dataproc/src/v1/job_controller_client.ts b/packages/google-cloud-dataproc/src/v1/job_controller_client.ts index 8c9708049f1..8db0cc5cc79 100644 --- a/packages/google-cloud-dataproc/src/v1/job_controller_client.ts +++ b/packages/google-cloud-dataproc/src/v1/job_controller_client.ts @@ -22,6 +22,7 @@ import { CallOptions, Descriptors, ClientOptions, + LROperation, PaginationCallback, GaxCall, } from 'google-gax'; @@ -55,6 +56,7 @@ export class JobControllerClient { }; innerApiCalls: {[name: string]: Function}; pathTemplates: {[name: string]: gax.PathTemplate}; + operationsClient: gax.OperationsClient; jobControllerStub?: Promise<{[name: string]: Function}>; /** @@ -181,6 +183,37 @@ export class JobControllerClient { ), }; + // This API contains "long-running operations", which return a + // an Operation object that allows for tracking of the operation, + // rather than holding a request open. + const protoFilesRoot = opts.fallback + ? this._gaxModule.protobuf.Root.fromJSON( + // eslint-disable-next-line @typescript-eslint/no-var-requires + require('../../protos/protos.json') + ) + : this._gaxModule.protobuf.loadSync(nodejsProtoPath); + + this.operationsClient = this._gaxModule + .lro({ + auth: this.auth, + grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined, + }) + .operationsClient(opts); + const submitJobAsOperationResponse = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1.Job' + ) as gax.protobuf.Type; + const submitJobAsOperationMetadata = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1.JobMetadata' + ) as gax.protobuf.Type; + + this.descriptors.longrunning = { + submitJobAsOperation: new this._gaxModule.LongrunningDescriptor( + this.operationsClient, + submitJobAsOperationResponse.decode.bind(submitJobAsOperationResponse), + submitJobAsOperationMetadata.decode.bind(submitJobAsOperationMetadata) + ), + }; + // Put together the default options sent with requests. this._defaults = this._gaxGrpc.constructSettings( 'google.cloud.dataproc.v1.JobController', @@ -228,6 +261,7 @@ export class JobControllerClient { // and create an API call method for each. const jobControllerStubMethods = [ 'submitJob', + 'submitJobAsOperation', 'getJob', 'listJobs', 'updateJob', @@ -729,6 +763,114 @@ export class JobControllerClient { return this.innerApiCalls.deleteJob(request, options, callback); } + submitJobAsOperation( + request: protos.google.cloud.dataproc.v1.ISubmitJobRequest, + options?: gax.CallOptions + ): Promise< + [ + LROperation< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.IJobMetadata + >, + protos.google.longrunning.IOperation | undefined, + {} | undefined + ] + >; + submitJobAsOperation( + request: protos.google.cloud.dataproc.v1.ISubmitJobRequest, + options: gax.CallOptions, + callback: Callback< + LROperation< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.IJobMetadata + >, + protos.google.longrunning.IOperation | null | undefined, + {} | null | undefined + > + ): void; + submitJobAsOperation( + request: protos.google.cloud.dataproc.v1.ISubmitJobRequest, + callback: Callback< + LROperation< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.IJobMetadata + >, + protos.google.longrunning.IOperation | null | undefined, + {} | null | undefined + > + ): void; + /** + * Submits job to a cluster. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + * @param {string} request.region + * Required. The Dataproc region in which to handle the request. + * @param {google.cloud.dataproc.v1.Job} request.job + * Required. The job resource. + * @param {string} [request.requestId] + * Optional. A unique id used to identify the request. If the server + * receives two {@link google.cloud.dataproc.v1.SubmitJobRequest|SubmitJobRequest} requests with the same + * id, then the second request will be ignored and the + * first {@link google.cloud.dataproc.v1.Job|Job} created and stored in the backend + * is returned. + * + * It is recommended to always set this value to a + * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + * + * The id must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [Operation]{@link google.longrunning.Operation}. + * The promise has a method named "cancel" which cancels the ongoing API call. + */ + submitJobAsOperation( + request: protos.google.cloud.dataproc.v1.ISubmitJobRequest, + optionsOrCallback?: + | gax.CallOptions + | Callback< + LROperation< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.IJobMetadata + >, + protos.google.longrunning.IOperation | null | undefined, + {} | null | undefined + >, + callback?: Callback< + LROperation< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.IJobMetadata + >, + protos.google.longrunning.IOperation | null | undefined, + {} | null | undefined + > + ): Promise< + [ + LROperation< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.IJobMetadata + >, + protos.google.longrunning.IOperation | undefined, + {} | undefined + ] + > | void { + request = request || {}; + let options: gax.CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } else { + options = optionsOrCallback as gax.CallOptions; + } + options = options || {}; + this.initialize(); + return this.innerApiCalls.submitJobAsOperation(request, options, callback); + } listJobs( request: protos.google.cloud.dataproc.v1.IListJobsRequest, options?: gax.CallOptions diff --git a/packages/google-cloud-dataproc/src/v1/job_controller_client_config.json b/packages/google-cloud-dataproc/src/v1/job_controller_client_config.json index 9f37054a78e..5d75711034e 100644 --- a/packages/google-cloud-dataproc/src/v1/job_controller_client_config.json +++ b/packages/google-cloud-dataproc/src/v1/job_controller_client_config.json @@ -33,6 +33,11 @@ "retry_codes_name": "unavailable", "retry_params_name": "default" }, + "SubmitJobAsOperation": { + "timeout_millis": 900000, + "retry_codes_name": "unavailable", + "retry_params_name": "default" + }, "GetJob": { "timeout_millis": 900000, "retry_codes_name": "deadline_exceeded_internal_unavailable", diff --git a/packages/google-cloud-dataproc/src/v1beta2/cluster_controller_client.ts b/packages/google-cloud-dataproc/src/v1beta2/cluster_controller_client.ts index 3b84914cdad..123235e7fdc 100644 --- a/packages/google-cloud-dataproc/src/v1beta2/cluster_controller_client.ts +++ b/packages/google-cloud-dataproc/src/v1beta2/cluster_controller_client.ts @@ -1006,7 +1006,7 @@ export class ClusterControllerClient { > ): void; /** - * Lists all regions/{region}/clusters in a project. + * Lists all regions/{region}/clusters in a project alphabetically. * * @param {Object} request * The request object that will be sent. diff --git a/packages/google-cloud-dataproc/src/v1beta2/job_controller_client.ts b/packages/google-cloud-dataproc/src/v1beta2/job_controller_client.ts index 313b9e036ae..80e9a2aa76e 100644 --- a/packages/google-cloud-dataproc/src/v1beta2/job_controller_client.ts +++ b/packages/google-cloud-dataproc/src/v1beta2/job_controller_client.ts @@ -22,6 +22,7 @@ import { CallOptions, Descriptors, ClientOptions, + LROperation, PaginationCallback, GaxCall, } from 'google-gax'; @@ -55,6 +56,7 @@ export class JobControllerClient { }; innerApiCalls: {[name: string]: Function}; pathTemplates: {[name: string]: gax.PathTemplate}; + operationsClient: gax.OperationsClient; jobControllerStub?: Promise<{[name: string]: Function}>; /** @@ -181,6 +183,37 @@ export class JobControllerClient { ), }; + // This API contains "long-running operations", which return a + // an Operation object that allows for tracking of the operation, + // rather than holding a request open. + const protoFilesRoot = opts.fallback + ? this._gaxModule.protobuf.Root.fromJSON( + // eslint-disable-next-line @typescript-eslint/no-var-requires + require('../../protos/protos.json') + ) + : this._gaxModule.protobuf.loadSync(nodejsProtoPath); + + this.operationsClient = this._gaxModule + .lro({ + auth: this.auth, + grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined, + }) + .operationsClient(opts); + const submitJobAsOperationResponse = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1beta2.Job' + ) as gax.protobuf.Type; + const submitJobAsOperationMetadata = protoFilesRoot.lookup( + '.google.cloud.dataproc.v1beta2.JobMetadata' + ) as gax.protobuf.Type; + + this.descriptors.longrunning = { + submitJobAsOperation: new this._gaxModule.LongrunningDescriptor( + this.operationsClient, + submitJobAsOperationResponse.decode.bind(submitJobAsOperationResponse), + submitJobAsOperationMetadata.decode.bind(submitJobAsOperationMetadata) + ), + }; + // Put together the default options sent with requests. this._defaults = this._gaxGrpc.constructSettings( 'google.cloud.dataproc.v1beta2.JobController', @@ -228,6 +261,7 @@ export class JobControllerClient { // and create an API call method for each. const jobControllerStubMethods = [ 'submitJob', + 'submitJobAsOperation', 'getJob', 'listJobs', 'updateJob', @@ -739,6 +773,114 @@ export class JobControllerClient { return this.innerApiCalls.deleteJob(request, options, callback); } + submitJobAsOperation( + request: protos.google.cloud.dataproc.v1beta2.ISubmitJobRequest, + options?: gax.CallOptions + ): Promise< + [ + LROperation< + protos.google.cloud.dataproc.v1beta2.IJob, + protos.google.cloud.dataproc.v1beta2.IJobMetadata + >, + protos.google.longrunning.IOperation | undefined, + {} | undefined + ] + >; + submitJobAsOperation( + request: protos.google.cloud.dataproc.v1beta2.ISubmitJobRequest, + options: gax.CallOptions, + callback: Callback< + LROperation< + protos.google.cloud.dataproc.v1beta2.IJob, + protos.google.cloud.dataproc.v1beta2.IJobMetadata + >, + protos.google.longrunning.IOperation | null | undefined, + {} | null | undefined + > + ): void; + submitJobAsOperation( + request: protos.google.cloud.dataproc.v1beta2.ISubmitJobRequest, + callback: Callback< + LROperation< + protos.google.cloud.dataproc.v1beta2.IJob, + protos.google.cloud.dataproc.v1beta2.IJobMetadata + >, + protos.google.longrunning.IOperation | null | undefined, + {} | null | undefined + > + ): void; + /** + * Submits job to a cluster. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + * @param {string} request.region + * Required. The Dataproc region in which to handle the request. + * @param {google.cloud.dataproc.v1beta2.Job} request.job + * Required. The job resource. + * @param {string} [request.requestId] + * Optional. A unique id used to identify the request. If the server + * receives two {@link google.cloud.dataproc.v1beta2.SubmitJobRequest|SubmitJobRequest} requests with the same + * id, then the second request will be ignored and the + * first {@link google.cloud.dataproc.v1beta2.Job|Job} created and stored in the backend + * is returned. + * + * It is recommended to always set this value to a + * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + * + * The id must contain only letters (a-z, A-Z), numbers (0-9), + * underscores (_), and hyphens (-). The maximum length is 40 characters. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [Operation]{@link google.longrunning.Operation}. + * The promise has a method named "cancel" which cancels the ongoing API call. + */ + submitJobAsOperation( + request: protos.google.cloud.dataproc.v1beta2.ISubmitJobRequest, + optionsOrCallback?: + | gax.CallOptions + | Callback< + LROperation< + protos.google.cloud.dataproc.v1beta2.IJob, + protos.google.cloud.dataproc.v1beta2.IJobMetadata + >, + protos.google.longrunning.IOperation | null | undefined, + {} | null | undefined + >, + callback?: Callback< + LROperation< + protos.google.cloud.dataproc.v1beta2.IJob, + protos.google.cloud.dataproc.v1beta2.IJobMetadata + >, + protos.google.longrunning.IOperation | null | undefined, + {} | null | undefined + > + ): Promise< + [ + LROperation< + protos.google.cloud.dataproc.v1beta2.IJob, + protos.google.cloud.dataproc.v1beta2.IJobMetadata + >, + protos.google.longrunning.IOperation | undefined, + {} | undefined + ] + > | void { + request = request || {}; + let options: gax.CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } else { + options = optionsOrCallback as gax.CallOptions; + } + options = options || {}; + this.initialize(); + return this.innerApiCalls.submitJobAsOperation(request, options, callback); + } listJobs( request: protos.google.cloud.dataproc.v1beta2.IListJobsRequest, options?: gax.CallOptions diff --git a/packages/google-cloud-dataproc/src/v1beta2/job_controller_client_config.json b/packages/google-cloud-dataproc/src/v1beta2/job_controller_client_config.json index 58341b85e5d..10a6d8cee63 100644 --- a/packages/google-cloud-dataproc/src/v1beta2/job_controller_client_config.json +++ b/packages/google-cloud-dataproc/src/v1beta2/job_controller_client_config.json @@ -33,6 +33,11 @@ "retry_codes_name": "unavailable", "retry_params_name": "default" }, + "SubmitJobAsOperation": { + "timeout_millis": 900000, + "retry_codes_name": "unavailable", + "retry_params_name": "default" + }, "GetJob": { "timeout_millis": 900000, "retry_codes_name": "deadline_exceeded_internal_unavailable", diff --git a/packages/google-cloud-dataproc/src/v1beta2/workflow_template_service_client.ts b/packages/google-cloud-dataproc/src/v1beta2/workflow_template_service_client.ts index 81b68898241..569277d1f94 100644 --- a/packages/google-cloud-dataproc/src/v1beta2/workflow_template_service_client.ts +++ b/packages/google-cloud-dataproc/src/v1beta2/workflow_template_service_client.ts @@ -997,8 +997,7 @@ export class WorkflowTemplateServiceClient { * Instantiates a template and begins execution. * * This method is equivalent to executing the sequence - * {@link google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate|CreateWorkflowTemplate}, - * {@link google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate|InstantiateWorkflowTemplate}, + * {@link google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate|CreateWorkflowTemplate}, {@link google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate|InstantiateWorkflowTemplate}, * {@link google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate|DeleteWorkflowTemplate}. * * The returned Operation can be used to track execution of diff --git a/packages/google-cloud-dataproc/synth.metadata b/packages/google-cloud-dataproc/synth.metadata index 66638e465ce..dfd8dfc5523 100644 --- a/packages/google-cloud-dataproc/synth.metadata +++ b/packages/google-cloud-dataproc/synth.metadata @@ -1,12 +1,26 @@ { - "updateTime": "2020-04-11T00:10:38.811960Z", "sources": [ + { + "git": { + "name": ".", + "remote": "https://github.com/googleapis/nodejs-dataproc.git", + "sha": "2eb5c2bfa756acf779ae0df1af1b6a6eb6cf740f" + } + }, + { + "git": { + "name": "googleapis", + "remote": "https://github.com/googleapis/googleapis.git", + "sha": "26523a96798ce1a6caa1b3c912119059cfcc98a7", + "internalRef": "306320014" + } + }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "6f32150677c9784f3c3a7e1949472bd29c9d72c5", - "log": "6f32150677c9784f3c3a7e1949472bd29c9d72c5\nfix: installs test_utils from its common repo (#480)\n\n\n74ce986d3b5431eb66985e9a00c4eb45295a4020\nfix: stop recording update_time in synth.metadata (#478)\n\n\n7f8e62aa3edd225f76347a16f92e400661fdfb52\nchore(java): release-please only updates non maven versions in README (#476)\n\nPrevent release-please and synthtool from fighting over the released library version. Synthtool updates the install snippets from the samples pom.xml files so the bots fight if they are temporarily out of sync after a release.\nc7e0e517d7f46f77bebd27da2e5afcaa6eee7e25\nbuild(java): fix nightly integration test config to run integrations (#465)\n\nThis was only running the units.\nbd69a2aa7b70875f3c988e269706b22fefbef40e\nbuild(java): fix retry_with_backoff when -e option set (#475)\n\n\nd9b173c427bfa0c6cca818233562e7e8841a357c\nfix: record version of working repo in synth.metadata (#473)\n\nPartial revert of b37cf74d12e9a42b9de9e61a4f26133d7cd9c168.\nf73a541770d95a609e5be6bf6b3b220d17cefcbe\nfeat(discogapic): allow local discovery-artifact-manager (#474)\n\n\n8cf0f5d93a70c3dcb0b4999d3152c46d4d9264bf\ndoc: describe the Autosynth & Synthtool protocol (#472)\n\n* doc: describe the Autosynth & Synthtool protocol\n\n* Accommodate review comments.\n980baaa738a1ad8fa02b4fdbd56be075ee77ece5\nfix: pin sphinx to <3.0.0 as new version causes new error (#471)\n\nThe error `toctree contains reference to document changlelog that doesn't have a title: no link will be generated` occurs as of 3.0.0. Pinning to 2.x until we address the docs build issue.\n\nTowards #470\n\nI did this manually for python-datastore https://github.com/googleapis/python-datastore/pull/22\n928b2998ac5023e7c7e254ab935f9ef022455aad\nchore(deps): update dependency com.google.cloud.samples:shared-configuration to v1.0.15 (#466)\n\nCo-authored-by: Jeffrey Rennie \n188f1b1d53181f739b98f8aa5d40cfe99eb90c47\nfix: allow local and external deps to be specified (#469)\n\nModify noxfile.py to allow local and external dependencies for\nsystem tests to be specified.\n1df68ed6735ddce6797d0f83641a731c3c3f75b4\nfix: apache license URL (#468)\n\n\nf4a59efa54808c4b958263de87bc666ce41e415f\nfeat: Add discogapic support for GAPICBazel generation (#459)\n\n* feat: Add discogapic support for GAPICBazel generation\n\n* reformat with black\n\n* Rename source repository variable\n\nCo-authored-by: Jeffrey Rennie \n" + "sha": "52638600f387deb98efb5f9c85fec39e82aa9052", + "log": "52638600f387deb98efb5f9c85fec39e82aa9052\nbuild(java): set GOOGLE_CLOUD_PROJECT env for samples/integration tests (#484)\n\n* build(java): set GOOGLE_CLOUD_PROJECT env variable for samples/integration tests\n\n* ci: use java-docs-samples-testing for sample tests\n3df869dd6eb546ef13beeb7a9efa6ee0226afafd\nci: add dependency list completeness check (#490)\n\n\n682c0c37d1054966ca662a44259e96cc7aea4413\nbuild(nodejs): update lint ignore rules (#488)\n\n\n97c7ccfdceb927db1cbe6f3bb09616aa02bafd89\ndoc: document context-aware commit flags (#481)\n\nAlso, delete obsolete blurb about cleaning up old, dead files.\n\nCo-authored-by: Jeff Ching \n8eff3790f88b50706a0c4b6a20b385f24e9ac4e7\nfeat: common postprocessing for node libraries (#485)\n\nCo-authored-by: Justin Beckwith \n21c3b57ae54ae9db6a3a6b48b31c970c6ab56f19\nbuild(nodejs): remove unused codecov config (#486)\n\n\n" } } ], diff --git a/packages/google-cloud-dataproc/test/gapic_job_controller_v1.ts b/packages/google-cloud-dataproc/test/gapic_job_controller_v1.ts index d40835f1638..28a7a6ec383 100644 --- a/packages/google-cloud-dataproc/test/gapic_job_controller_v1.ts +++ b/packages/google-cloud-dataproc/test/gapic_job_controller_v1.ts @@ -25,7 +25,7 @@ import * as jobcontrollerModule from '../src'; import {PassThrough} from 'stream'; -import {protobuf} from 'google-gax'; +import {protobuf, LROperation} from 'google-gax'; function generateSampleMessage(instance: T) { const filledObject = (instance.constructor as typeof protobuf.Message).toObject( @@ -52,6 +52,38 @@ function stubSimpleCallWithCallback( : sinon.stub().callsArgWith(2, null, response); } +function stubLongRunningCall( + response?: ResponseType, + callError?: Error, + lroError?: Error +) { + const innerStub = lroError + ? sinon.stub().rejects(lroError) + : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError + ? sinon.stub().rejects(callError) + : sinon.stub().resolves([mockOperation]); +} + +function stubLongRunningCallWithCallback( + response?: ResponseType, + callError?: Error, + lroError?: Error +) { + const innerStub = lroError + ? sinon.stub().rejects(lroError) + : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError + ? sinon.stub().callsArgWith(2, callError) + : sinon.stub().callsArgWith(2, null, mockOperation); +} + function stubPageStreamingCall( responses?: ResponseType[], error?: Error @@ -629,6 +661,133 @@ describe('v1.JobControllerClient', () => { }); }); + describe('submitJobAsOperation', () => { + it('invokes submitJobAsOperation without error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.SubmitJobRequest() + ); + const expectedOptions = {}; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.submitJobAsOperation = stubLongRunningCall( + expectedResponse + ); + const [operation] = await client.submitJobAsOperation(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + assert( + (client.innerApiCalls.submitJobAsOperation as SinonStub) + .getCall(0) + .calledWith(request, expectedOptions, undefined) + ); + }); + + it('invokes submitJobAsOperation without error using callback', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.SubmitJobRequest() + ); + const expectedOptions = {}; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.submitJobAsOperation = stubLongRunningCallWithCallback( + expectedResponse + ); + const promise = new Promise((resolve, reject) => { + client.submitJobAsOperation( + request, + ( + err?: Error | null, + result?: LROperation< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.IJobMetadata + > | null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + } + ); + }); + const operation = (await promise) as LROperation< + protos.google.cloud.dataproc.v1.IJob, + protos.google.cloud.dataproc.v1.IJobMetadata + >; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + assert( + (client.innerApiCalls.submitJobAsOperation as SinonStub) + .getCall(0) + .calledWith(request, expectedOptions /*, callback defined above */) + ); + }); + + it('invokes submitJobAsOperation with call error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.SubmitJobRequest() + ); + const expectedOptions = {}; + const expectedError = new Error('expected'); + client.innerApiCalls.submitJobAsOperation = stubLongRunningCall( + undefined, + expectedError + ); + await assert.rejects(async () => { + await client.submitJobAsOperation(request); + }, expectedError); + assert( + (client.innerApiCalls.submitJobAsOperation as SinonStub) + .getCall(0) + .calledWith(request, expectedOptions, undefined) + ); + }); + + it('invokes submitJobAsOperation with LRO error', async () => { + const client = new jobcontrollerModule.v1.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1.SubmitJobRequest() + ); + const expectedOptions = {}; + const expectedError = new Error('expected'); + client.innerApiCalls.submitJobAsOperation = stubLongRunningCall( + undefined, + undefined, + expectedError + ); + const [operation] = await client.submitJobAsOperation(request); + await assert.rejects(async () => { + await operation.promise(); + }, expectedError); + assert( + (client.innerApiCalls.submitJobAsOperation as SinonStub) + .getCall(0) + .calledWith(request, expectedOptions, undefined) + ); + }); + }); + describe('listJobs', () => { it('invokes listJobs without error', async () => { const client = new jobcontrollerModule.v1.JobControllerClient({ diff --git a/packages/google-cloud-dataproc/test/gapic_job_controller_v1beta2.ts b/packages/google-cloud-dataproc/test/gapic_job_controller_v1beta2.ts index 468f547c185..ee4c5c07b4c 100644 --- a/packages/google-cloud-dataproc/test/gapic_job_controller_v1beta2.ts +++ b/packages/google-cloud-dataproc/test/gapic_job_controller_v1beta2.ts @@ -25,7 +25,7 @@ import * as jobcontrollerModule from '../src'; import {PassThrough} from 'stream'; -import {protobuf} from 'google-gax'; +import {protobuf, LROperation} from 'google-gax'; function generateSampleMessage(instance: T) { const filledObject = (instance.constructor as typeof protobuf.Message).toObject( @@ -52,6 +52,38 @@ function stubSimpleCallWithCallback( : sinon.stub().callsArgWith(2, null, response); } +function stubLongRunningCall( + response?: ResponseType, + callError?: Error, + lroError?: Error +) { + const innerStub = lroError + ? sinon.stub().rejects(lroError) + : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError + ? sinon.stub().rejects(callError) + : sinon.stub().resolves([mockOperation]); +} + +function stubLongRunningCallWithCallback( + response?: ResponseType, + callError?: Error, + lroError?: Error +) { + const innerStub = lroError + ? sinon.stub().rejects(lroError) + : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError + ? sinon.stub().callsArgWith(2, callError) + : sinon.stub().callsArgWith(2, null, mockOperation); +} + function stubPageStreamingCall( responses?: ResponseType[], error?: Error @@ -631,6 +663,133 @@ describe('v1beta2.JobControllerClient', () => { }); }); + describe('submitJobAsOperation', () => { + it('invokes submitJobAsOperation without error', async () => { + const client = new jobcontrollerModule.v1beta2.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1beta2.SubmitJobRequest() + ); + const expectedOptions = {}; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.submitJobAsOperation = stubLongRunningCall( + expectedResponse + ); + const [operation] = await client.submitJobAsOperation(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + assert( + (client.innerApiCalls.submitJobAsOperation as SinonStub) + .getCall(0) + .calledWith(request, expectedOptions, undefined) + ); + }); + + it('invokes submitJobAsOperation without error using callback', async () => { + const client = new jobcontrollerModule.v1beta2.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1beta2.SubmitJobRequest() + ); + const expectedOptions = {}; + const expectedResponse = generateSampleMessage( + new protos.google.longrunning.Operation() + ); + client.innerApiCalls.submitJobAsOperation = stubLongRunningCallWithCallback( + expectedResponse + ); + const promise = new Promise((resolve, reject) => { + client.submitJobAsOperation( + request, + ( + err?: Error | null, + result?: LROperation< + protos.google.cloud.dataproc.v1beta2.IJob, + protos.google.cloud.dataproc.v1beta2.IJobMetadata + > | null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + } + ); + }); + const operation = (await promise) as LROperation< + protos.google.cloud.dataproc.v1beta2.IJob, + protos.google.cloud.dataproc.v1beta2.IJobMetadata + >; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + assert( + (client.innerApiCalls.submitJobAsOperation as SinonStub) + .getCall(0) + .calledWith(request, expectedOptions /*, callback defined above */) + ); + }); + + it('invokes submitJobAsOperation with call error', async () => { + const client = new jobcontrollerModule.v1beta2.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1beta2.SubmitJobRequest() + ); + const expectedOptions = {}; + const expectedError = new Error('expected'); + client.innerApiCalls.submitJobAsOperation = stubLongRunningCall( + undefined, + expectedError + ); + await assert.rejects(async () => { + await client.submitJobAsOperation(request); + }, expectedError); + assert( + (client.innerApiCalls.submitJobAsOperation as SinonStub) + .getCall(0) + .calledWith(request, expectedOptions, undefined) + ); + }); + + it('invokes submitJobAsOperation with LRO error', async () => { + const client = new jobcontrollerModule.v1beta2.JobControllerClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.dataproc.v1beta2.SubmitJobRequest() + ); + const expectedOptions = {}; + const expectedError = new Error('expected'); + client.innerApiCalls.submitJobAsOperation = stubLongRunningCall( + undefined, + undefined, + expectedError + ); + const [operation] = await client.submitJobAsOperation(request); + await assert.rejects(async () => { + await operation.promise(); + }, expectedError); + assert( + (client.innerApiCalls.submitJobAsOperation as SinonStub) + .getCall(0) + .calledWith(request, expectedOptions, undefined) + ); + }); + }); + describe('listJobs', () => { it('invokes listJobs without error', async () => { const client = new jobcontrollerModule.v1beta2.JobControllerClient({