diff --git a/packages/google-cloud-compute/google/cloud/compute/__init__.py b/packages/google-cloud-compute/google/cloud/compute/__init__.py index 646f9e1fc0b7..86c395be0e51 100644 --- a/packages/google-cloud-compute/google/cloud/compute/__init__.py +++ b/packages/google-cloud-compute/google/cloud/compute/__init__.py @@ -1115,6 +1115,7 @@ PatchRegionSslPolicyRequest, PatchRegionTargetHttpsProxyRequest, PatchRegionUrlMapRequest, + PatchResourcePolicyRequest, PatchRouterRequest, PatchRuleFirewallPolicyRequest, PatchRuleNetworkFirewallPolicyRequest, @@ -2571,6 +2572,7 @@ "PatchRegionSslPolicyRequest", "PatchRegionTargetHttpsProxyRequest", "PatchRegionUrlMapRequest", + "PatchResourcePolicyRequest", "PatchRouterRequest", "PatchRuleFirewallPolicyRequest", "PatchRuleNetworkFirewallPolicyRequest", diff --git a/packages/google-cloud-compute/google/cloud/compute_v1/__init__.py b/packages/google-cloud-compute/google/cloud/compute_v1/__init__.py index b3b495610120..55da57749aae 100644 --- a/packages/google-cloud-compute/google/cloud/compute_v1/__init__.py +++ b/packages/google-cloud-compute/google/cloud/compute_v1/__init__.py @@ -1009,6 +1009,7 @@ PatchRegionSslPolicyRequest, PatchRegionTargetHttpsProxyRequest, PatchRegionUrlMapRequest, + PatchResourcePolicyRequest, PatchRouterRequest, PatchRuleFirewallPolicyRequest, PatchRuleNetworkFirewallPolicyRequest, @@ -2416,6 +2417,7 @@ "PatchRegionSslPolicyRequest", "PatchRegionTargetHttpsProxyRequest", "PatchRegionUrlMapRequest", + "PatchResourcePolicyRequest", "PatchRouterRequest", "PatchRuleFirewallPolicyRequest", "PatchRuleNetworkFirewallPolicyRequest", diff --git a/packages/google-cloud-compute/google/cloud/compute_v1/gapic_metadata.json b/packages/google-cloud-compute/google/cloud/compute_v1/gapic_metadata.json index d70dcf72e83f..0f722bc68444 100644 --- a/packages/google-cloud-compute/google/cloud/compute_v1/gapic_metadata.json +++ b/packages/google-cloud-compute/google/cloud/compute_v1/gapic_metadata.json @@ -3317,6 +3317,11 @@ "list" ] }, + "Patch": { + "methods": [ + "patch" + ] + }, "SetIamPolicy": { "methods": [ "set_iam_policy" diff --git a/packages/google-cloud-compute/google/cloud/compute_v1/services/region_security_policies/client.py b/packages/google-cloud-compute/google/cloud/compute_v1/services/region_security_policies/client.py index d269c4dfc664..0c306bab2eb0 100644 --- a/packages/google-cloud-compute/google/cloud/compute_v1/services/region_security_policies/client.py +++ b/packages/google-cloud-compute/google/cloud/compute_v1/services/region_security_policies/client.py @@ -1267,7 +1267,7 @@ def patch_unary( metadata: Sequence[Tuple[str, str]] = (), ) -> compute.Operation: r"""Patches the specified policy with the data included - in the request. To clear fields in the rule, leave the + in the request. To clear fields in the policy, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, @@ -1411,7 +1411,7 @@ def patch( metadata: Sequence[Tuple[str, str]] = (), ) -> extended_operation.ExtendedOperation: r"""Patches the specified policy with the data included - in the request. To clear fields in the rule, leave the + in the request. To clear fields in the policy, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, diff --git a/packages/google-cloud-compute/google/cloud/compute_v1/services/resource_policies/client.py b/packages/google-cloud-compute/google/cloud/compute_v1/services/resource_policies/client.py index 65bfec0a3b35..22dc62206e6e 100644 --- a/packages/google-cloud-compute/google/cloud/compute_v1/services/resource_policies/client.py +++ b/packages/google-cloud-compute/google/cloud/compute_v1/services/resource_policies/client.py @@ -1518,6 +1518,301 @@ def sample_list(): # Done; return the response. return response + def patch_unary( + self, + request: Optional[Union[compute.PatchResourcePolicyRequest, dict]] = None, + *, + project: Optional[str] = None, + region: Optional[str] = None, + resource_policy: Optional[str] = None, + resource_policy_resource: Optional[compute.ResourcePolicy] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Modify the specified resource policy. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import compute_v1 + + def sample_patch(): + # Create a client + client = compute_v1.ResourcePoliciesClient() + + # Initialize request argument(s) + request = compute_v1.PatchResourcePolicyRequest( + project="project_value", + region="region_value", + resource_policy="resource_policy_value", + ) + + # Make the request + response = client.patch(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.compute_v1.types.PatchResourcePolicyRequest, dict]): + The request object. A request message for + ResourcePolicies.Patch. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource_policy (str): + Id of the resource policy to patch. + This corresponds to the ``resource_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource_policy_resource (google.cloud.compute_v1.types.ResourcePolicy): + The body resource for this request + This corresponds to the ``resource_policy_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.extended_operation.ExtendedOperation: + An object representing a extended + long-running operation. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [project, region, resource_policy, resource_policy_resource] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchResourcePolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchResourcePolicyRequest): + request = compute.PatchResourcePolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource_policy is not None: + request.resource_policy = resource_policy + if resource_policy_resource is not None: + request.resource_policy_resource = resource_policy_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + ( + ("project", request.project), + ("region", request.region), + ("resource_policy", request.resource_policy), + ) + ), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch( + self, + request: Optional[Union[compute.PatchResourcePolicyRequest, dict]] = None, + *, + project: Optional[str] = None, + region: Optional[str] = None, + resource_policy: Optional[str] = None, + resource_policy_resource: Optional[compute.ResourcePolicy] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> extended_operation.ExtendedOperation: + r"""Modify the specified resource policy. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import compute_v1 + + def sample_patch(): + # Create a client + client = compute_v1.ResourcePoliciesClient() + + # Initialize request argument(s) + request = compute_v1.PatchResourcePolicyRequest( + project="project_value", + region="region_value", + resource_policy="resource_policy_value", + ) + + # Make the request + response = client.patch(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.compute_v1.types.PatchResourcePolicyRequest, dict]): + The request object. A request message for + ResourcePolicies.Patch. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource_policy (str): + Id of the resource policy to patch. + This corresponds to the ``resource_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource_policy_resource (google.cloud.compute_v1.types.ResourcePolicy): + The body resource for this request + This corresponds to the ``resource_policy_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.extended_operation.ExtendedOperation: + An object representing a extended + long-running operation. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [project, region, resource_policy, resource_policy_resource] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchResourcePolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchResourcePolicyRequest): + request = compute.PatchResourcePolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource_policy is not None: + request.resource_policy = resource_policy + if resource_policy_resource is not None: + request.resource_policy_resource = resource_policy_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + ( + ("project", request.project), + ("region", request.region), + ("resource_policy", request.resource_policy), + ) + ), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + operation_service = self._transport._region_operations_client + operation_request = compute.GetRegionOperationRequest() + operation_request.project = request.project + operation_request.region = request.region + operation_request.operation = response.name + + get_operation = functools.partial(operation_service.get, operation_request) + # Cancel is not part of extended operations yet. + cancel_operation = lambda: None + + # Note: this class is an implementation detail to provide a uniform + # set of names for certain fields in the extended operation proto message. + # See google.api_core.extended_operation.ExtendedOperation for details + # on these properties and the expected interface. + class _CustomOperation(extended_operation.ExtendedOperation): + @property + def error_message(self): + return self._extended_operation.http_error_message + + @property + def error_code(self): + return self._extended_operation.http_error_status_code + + response = _CustomOperation.make(get_operation, cancel_operation, response) + + # Done; return the response. + return response + def set_iam_policy( self, request: Optional[ diff --git a/packages/google-cloud-compute/google/cloud/compute_v1/services/resource_policies/transports/base.py b/packages/google-cloud-compute/google/cloud/compute_v1/services/resource_policies/transports/base.py index 984d169dd0ab..5f1f23fabb66 100644 --- a/packages/google-cloud-compute/google/cloud/compute_v1/services/resource_policies/transports/base.py +++ b/packages/google-cloud-compute/google/cloud/compute_v1/services/resource_policies/transports/base.py @@ -157,6 +157,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), self.set_iam_policy: gapic_v1.method.wrap_method( self.set_iam_policy, default_timeout=None, @@ -235,6 +240,15 @@ def list( ]: raise NotImplementedError() + @property + def patch( + self, + ) -> Callable[ + [compute.PatchResourcePolicyRequest], + Union[compute.Operation, Awaitable[compute.Operation]], + ]: + raise NotImplementedError() + @property def set_iam_policy( self, diff --git a/packages/google-cloud-compute/google/cloud/compute_v1/services/resource_policies/transports/rest.py b/packages/google-cloud-compute/google/cloud/compute_v1/services/resource_policies/transports/rest.py index 8293719cee8d..90b2870fc0bc 100644 --- a/packages/google-cloud-compute/google/cloud/compute_v1/services/resource_policies/transports/rest.py +++ b/packages/google-cloud-compute/google/cloud/compute_v1/services/resource_policies/transports/rest.py @@ -111,6 +111,14 @@ def post_list(self, response): logging.log(f"Received response: {response}") return response + def pre_patch(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_patch(self, response): + logging.log(f"Received response: {response}") + return response + def pre_set_iam_policy(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -265,6 +273,27 @@ def post_list( """ return response + def pre_patch( + self, + request: compute.PatchResourcePolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[compute.PatchResourcePolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for patch + + Override in a subclass to manipulate the request or metadata + before they are sent to the ResourcePolicies server. + """ + return request, metadata + + def post_patch(self, response: compute.Operation) -> compute.Operation: + """Post-rpc interceptor for patch + + Override in a subclass to manipulate the response + after it is returned by the ResourcePolicies server but before + it is returned to user code. + """ + return response + def pre_set_iam_policy( self, request: compute.SetIamPolicyResourcePolicyRequest, @@ -1017,6 +1046,116 @@ def __call__( resp = self._interceptor.post_list(resp) return resp + class _Patch(ResourcePoliciesRestStub): + def __hash__(self): + return hash("Patch") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: compute.PatchResourcePolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchResourcePolicyRequest): + The request object. A request message for + ResourcePolicies.Patch. See the method + description for details. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/compute/v1/projects/{project}/regions/{region}/resourcePolicies/{resource_policy}", + "body": "resource_policy_resource", + }, + ] + request, metadata = self._interceptor.pre_patch(request, metadata) + pb_request = compute.PatchResourcePolicyRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=False, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = compute.Operation() + pb_resp = compute.Operation.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_patch(resp) + return resp + class _SetIamPolicy(ResourcePoliciesRestStub): def __hash__(self): return hash("SetIamPolicy") @@ -1301,6 +1440,14 @@ def list( # In C++ this would require a dynamic_cast return self._List(self._session, self._host, self._interceptor) # type: ignore + @property + def patch( + self, + ) -> Callable[[compute.PatchResourcePolicyRequest], compute.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._Patch(self._session, self._host, self._interceptor) # type: ignore + @property def set_iam_policy( self, diff --git a/packages/google-cloud-compute/google/cloud/compute_v1/services/security_policies/client.py b/packages/google-cloud-compute/google/cloud/compute_v1/services/security_policies/client.py index 5acf15d6b845..7f047d8fff09 100644 --- a/packages/google-cloud-compute/google/cloud/compute_v1/services/security_policies/client.py +++ b/packages/google-cloud-compute/google/cloud/compute_v1/services/security_policies/client.py @@ -1792,7 +1792,7 @@ def patch_unary( metadata: Sequence[Tuple[str, str]] = (), ) -> compute.Operation: r"""Patches the specified policy with the data included - in the request. To clear fields in the rule, leave the + in the request. To clear fields in the policy, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, @@ -1922,7 +1922,7 @@ def patch( metadata: Sequence[Tuple[str, str]] = (), ) -> extended_operation.ExtendedOperation: r"""Patches the specified policy with the data included - in the request. To clear fields in the rule, leave the + in the request. To clear fields in the policy, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, @@ -2075,7 +2075,9 @@ def patch_rule_unary( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.Operation: - r"""Patches a rule at the specified priority. + r"""Patches a rule at the specified priority. To clear + fields in the rule, leave the fields empty and specify + them in the updateMask. .. code-block:: python @@ -2202,7 +2204,9 @@ def patch_rule( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> extended_operation.ExtendedOperation: - r"""Patches a rule at the specified priority. + r"""Patches a rule at the specified priority. To clear + fields in the rule, leave the fields empty and specify + them in the updateMask. .. code-block:: python diff --git a/packages/google-cloud-compute/google/cloud/compute_v1/types/__init__.py b/packages/google-cloud-compute/google/cloud/compute_v1/types/__init__.py index 244dd63a7bc2..e310d187d528 100644 --- a/packages/google-cloud-compute/google/cloud/compute_v1/types/__init__.py +++ b/packages/google-cloud-compute/google/cloud/compute_v1/types/__init__.py @@ -910,6 +910,7 @@ PatchRegionSslPolicyRequest, PatchRegionTargetHttpsProxyRequest, PatchRegionUrlMapRequest, + PatchResourcePolicyRequest, PatchRouterRequest, PatchRuleFirewallPolicyRequest, PatchRuleNetworkFirewallPolicyRequest, @@ -2276,6 +2277,7 @@ "PatchRegionSslPolicyRequest", "PatchRegionTargetHttpsProxyRequest", "PatchRegionUrlMapRequest", + "PatchResourcePolicyRequest", "PatchRouterRequest", "PatchRuleFirewallPolicyRequest", "PatchRuleNetworkFirewallPolicyRequest", diff --git a/packages/google-cloud-compute/google/cloud/compute_v1/types/compute.py b/packages/google-cloud-compute/google/cloud/compute_v1/types/compute.py index 25d387d13bf4..43d989f2050f 100644 --- a/packages/google-cloud-compute/google/cloud/compute_v1/types/compute.py +++ b/packages/google-cloud-compute/google/cloud/compute_v1/types/compute.py @@ -918,6 +918,7 @@ "PatchRegionSslPolicyRequest", "PatchRegionTargetHttpsProxyRequest", "PatchRegionUrlMapRequest", + "PatchResourcePolicyRequest", "PatchRouterRequest", "PatchRuleFirewallPolicyRequest", "PatchRuleNetworkFirewallPolicyRequest", @@ -12334,8 +12335,8 @@ class BackendBucket(proto.Message): This field is a member of `oneof`_ ``_creation_timestamp``. custom_response_headers (MutableSequence[str]): - Headers that the HTTP/S load balancer should - add to proxied responses. + Headers that the Application Load Balancer + should add to proxied responses. description (str): An optional textual description of the resource; provided by the client when the @@ -23807,6 +23808,13 @@ class TargetShape(proto.Enum): maximize utilization of unused zonal reservations. Recommended for batch workloads that do not require high availability. + ANY_SINGLE_ZONE (61100880): + The group creates all VM instances within a + single zone. The zone is selected based on the + present resource constraints and to maximize + utilization of unused zonal reservations. + Recommended for batch workloads with heavy + interprocess communication. BALANCED (468409608): The group prioritizes acquisition of resources, scheduling VMs in zones where @@ -23826,6 +23834,7 @@ class TargetShape(proto.Enum): """ UNDEFINED_TARGET_SHAPE = 0 ANY = 64972 + ANY_SINGLE_ZONE = 61100880 BALANCED = 468409608 EVEN = 2140442 @@ -40553,14 +40562,11 @@ class InstanceGroupManagerUpdatePolicy(proto.Message): This field is a member of `oneof`_ ``_replacement_method``. type_ (str): The type of update process. You can specify - either PROACTIVE so that the instance group - manager proactively executes actions in order to - bring instances to their target versions or - OPPORTUNISTIC so that no action is proactively - executed but the update will be performed as - part of other actions (for example, resizes or - recreateInstances calls). Check the Type enum - for the list of possible values. + either PROACTIVE so that the MIG automatically + updates VMs to the latest configurations or + OPPORTUNISTIC so that you can select the VMs + that you want to update. Check the Type enum for + the list of possible values. This field is a member of `oneof`_ ``_type``. """ @@ -40656,13 +40662,10 @@ class ReplacementMethod(proto.Enum): class Type(proto.Enum): r"""The type of update process. You can specify either PROACTIVE - so that the instance group manager proactively executes actions - in order to bring instances to their target versions or - OPPORTUNISTIC so that no action is proactively executed but the - update will be performed as part of other actions (for example, - resizes or recreateInstances calls). Additional supported values - which may be not listed in the enum directly due to technical - reasons: + so that the MIG automatically updates VMs to the latest + configurations or OPPORTUNISTIC so that you can select the VMs + that you want to update. Additional supported values which may + be not listed in the enum directly due to technical reasons: PROACTIVE @@ -40671,11 +40674,9 @@ class Type(proto.Enum): A value indicating that the enum field is not set. OPPORTUNISTIC (429530089): - No action is being proactively performed in - order to bring this IGM to its target version - distribution (regardless of whether this - distribution is expressed using instanceTemplate - or versions field). + MIG will apply new configurations to existing + VMs only when you selectively target specific or + all VMs to be updated. """ UNDEFINED_TYPE = 0 OPPORTUNISTIC = 429530089 @@ -69508,6 +69509,76 @@ class PatchRegionUrlMapRequest(proto.Message): ) +class PatchResourcePolicyRequest(proto.Message): + r"""A request message for ResourcePolicies.Patch. See the method + description for details. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + resource_policy (str): + Id of the resource policy to patch. + resource_policy_resource (google.cloud.compute_v1.types.ResourcePolicy): + The body resource for this request + update_mask (str): + update_mask indicates fields to be updated as part of this + request. + + This field is a member of `oneof`_ ``_update_mask``. + """ + + project: str = proto.Field( + proto.STRING, + number=227560217, + ) + region: str = proto.Field( + proto.STRING, + number=138946292, + ) + request_id: str = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + resource_policy: str = proto.Field( + proto.STRING, + number=159240835, + ) + resource_policy_resource: "ResourcePolicy" = proto.Field( + proto.MESSAGE, + number=76826186, + message="ResourcePolicy", + ) + update_mask: str = proto.Field( + proto.STRING, + number=500079778, + optional=True, + ) + + class PatchRouterRequest(proto.Message): r"""A request message for Routers.Patch. See the method description for details. @@ -72629,6 +72700,12 @@ class QuotaExceededInfo(proto.Message): Attributes: dimensions (MutableMapping[str, str]): The map holding related quota dimensions. + future_limit (float): + Future quota limit being rolled out. The + limit's unit depends on the quota type or + metric. + + This field is a member of `oneof`_ ``_future_limit``. limit (float): Current effective quota limit. The limit's unit depends on the quota type or metric. @@ -72642,13 +72719,42 @@ class QuotaExceededInfo(proto.Message): The Compute Engine quota metric name. This field is a member of `oneof`_ ``_metric_name``. + rollout_status (str): + Rollout status of the future quota limit. + Check the RolloutStatus enum for the list of + possible values. + + This field is a member of `oneof`_ ``_rollout_status``. """ + class RolloutStatus(proto.Enum): + r"""Rollout status of the future quota limit. + + Values: + UNDEFINED_ROLLOUT_STATUS (0): + A value indicating that the enum field is not + set. + IN_PROGRESS (469193735): + IN_PROGRESS - A rollout is in process which will change the + limit value to future limit. + ROLLOUT_STATUS_UNSPECIFIED (26864568): + ROLLOUT_STATUS_UNSPECIFIED - Rollout status is not + specified. The default value. + """ + UNDEFINED_ROLLOUT_STATUS = 0 + IN_PROGRESS = 469193735 + ROLLOUT_STATUS_UNSPECIFIED = 26864568 + dimensions: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=414334925, ) + future_limit: float = proto.Field( + proto.DOUBLE, + number=456564287, + optional=True, + ) limit: float = proto.Field( proto.DOUBLE, number=102976443, @@ -72664,6 +72770,11 @@ class QuotaExceededInfo(proto.Message): number=409881530, optional=True, ) + rollout_status: str = proto.Field( + proto.STRING, + number=476426816, + optional=True, + ) class RawDisk(proto.Message): @@ -80388,6 +80499,14 @@ class Scheduling(proto.Message): enum for the list of possible values. This field is a member of `oneof`_ ``_instance_termination_action``. + local_ssd_recovery_timeout (google.cloud.compute_v1.types.Duration): + Specifies the maximum amount of time a Local + Ssd Vm should wait while recovery of the Local + Ssd state is attempted. Its value should be in + between 0 and 168 hours with hour granularity + and the default value being 1 hour. + + This field is a member of `oneof`_ ``_local_ssd_recovery_timeout``. location_hint (str): An opaque location hint used to place the instance close to other resources. This field is @@ -80507,6 +80626,12 @@ class ProvisioningModel(proto.Enum): number=107380667, optional=True, ) + local_ssd_recovery_timeout: "Duration" = proto.Field( + proto.MESSAGE, + number=268015590, + optional=True, + message="Duration", + ) location_hint: str = proto.Field( proto.STRING, number=350519505, @@ -82757,6 +82882,10 @@ class ServiceAttachmentConnectedEndpoint(proto.Message): .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: + consumer_network (str): + The url of the consumer network. + + This field is a member of `oneof`_ ``_consumer_network``. endpoint (str): The url of a connected endpoint. @@ -82810,6 +82939,11 @@ class Status(proto.Enum): REJECTED = 174130302 STATUS_UNSPECIFIED = 42133066 + consumer_network: str = proto.Field( + proto.STRING, + number=254357221, + optional=True, + ) endpoint: str = proto.Field( proto.STRING, number=130489749, diff --git a/packages/google-cloud-compute/samples/generated_samples/compute_v1_generated_resource_policies_patch_sync.py b/packages/google-cloud-compute/samples/generated_samples/compute_v1_generated_resource_policies_patch_sync.py new file mode 100644 index 000000000000..c889d29892b9 --- /dev/null +++ b/packages/google-cloud-compute/samples/generated_samples/compute_v1_generated_resource_policies_patch_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for Patch +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-compute + + +# [START compute_v1_generated_ResourcePolicies_Patch_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import compute_v1 + + +def sample_patch(): + # Create a client + client = compute_v1.ResourcePoliciesClient() + + # Initialize request argument(s) + request = compute_v1.PatchResourcePolicyRequest( + project="project_value", + region="region_value", + resource_policy="resource_policy_value", + ) + + # Make the request + response = client.patch(request=request) + + # Handle the response + print(response) + +# [END compute_v1_generated_ResourcePolicies_Patch_sync] diff --git a/packages/google-cloud-compute/samples/generated_samples/snippet_metadata_google.cloud.compute.v1.json b/packages/google-cloud-compute/samples/generated_samples/snippet_metadata_google.cloud.compute.v1.json index e75a0d5a40b9..ebc934990a49 100644 --- a/packages/google-cloud-compute/samples/generated_samples/snippet_metadata_google.cloud.compute.v1.json +++ b/packages/google-cloud-compute/samples/generated_samples/snippet_metadata_google.cloud.compute.v1.json @@ -46911,6 +46911,98 @@ ], "title": "compute_v1_generated_resource_policies_list_sync.py" }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.compute_v1.ResourcePoliciesClient", + "shortName": "ResourcePoliciesClient" + }, + "fullName": "google.cloud.compute_v1.ResourcePoliciesClient.patch", + "method": { + "fullName": "google.cloud.compute.v1.ResourcePolicies.Patch", + "service": { + "fullName": "google.cloud.compute.v1.ResourcePolicies", + "shortName": "ResourcePolicies" + }, + "shortName": "Patch" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.compute_v1.types.PatchResourcePolicyRequest" + }, + { + "name": "project", + "type": "str" + }, + { + "name": "region", + "type": "str" + }, + { + "name": "resource_policy", + "type": "str" + }, + { + "name": "resource_policy_resource", + "type": "google.cloud.compute_v1.types.ResourcePolicy" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.extended_operation.ExtendedOperation", + "shortName": "patch" + }, + "description": "Sample for Patch", + "file": "compute_v1_generated_resource_policies_patch_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "compute_v1_generated_ResourcePolicies_Patch_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "compute_v1_generated_resource_policies_patch_sync.py" + }, { "canonical": true, "clientMethod": { diff --git a/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_instance_templates.py b/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_instance_templates.py index 9f8ca9503a74..e36fcb89487a 100644 --- a/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_instance_templates.py +++ b/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_instance_templates.py @@ -2309,6 +2309,7 @@ def test_insert_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -2721,6 +2722,7 @@ def test_insert_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -3002,6 +3004,7 @@ def test_insert_unary_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -3392,6 +3395,7 @@ def test_insert_unary_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ diff --git a/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_instances.py b/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_instances.py index c15735cf3d55..267b76c39d6a 100644 --- a/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_instances.py +++ b/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_instances.py @@ -3375,6 +3375,7 @@ def test_bulk_insert_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -3778,6 +3779,7 @@ def test_bulk_insert_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -4049,6 +4051,7 @@ def test_bulk_insert_unary_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -4430,6 +4433,7 @@ def test_bulk_insert_unary_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -8912,6 +8916,7 @@ def test_insert_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -9330,6 +9335,7 @@ def test_insert_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -9612,6 +9618,7 @@ def test_insert_unary_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -10008,6 +10015,7 @@ def test_insert_unary_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -19000,6 +19008,7 @@ def test_set_scheduling_rest(request_type): request_init["scheduling_resource"] = { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -19252,6 +19261,7 @@ def test_set_scheduling_rest_bad_request( request_init["scheduling_resource"] = { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -19369,6 +19379,7 @@ def test_set_scheduling_unary_rest(request_type): request_init["scheduling_resource"] = { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -19599,6 +19610,7 @@ def test_set_scheduling_unary_rest_bad_request( request_init["scheduling_resource"] = { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -25533,6 +25545,7 @@ def test_update_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -25956,6 +25969,7 @@ def test_update_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -26244,6 +26258,7 @@ def test_update_unary_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -26645,6 +26660,7 @@ def test_update_unary_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ diff --git a/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_machine_images.py b/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_machine_images.py index 44ed4b485436..b0e034587be1 100644 --- a/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_machine_images.py +++ b/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_machine_images.py @@ -1924,6 +1924,7 @@ def test_insert_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -2386,6 +2387,7 @@ def test_insert_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -2709,6 +2711,7 @@ def test_insert_unary_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -3151,6 +3154,7 @@ def test_insert_unary_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ diff --git a/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_region_instance_templates.py b/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_region_instance_templates.py index 49f1fd7dc7f9..40f9ddd4684d 100644 --- a/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_region_instance_templates.py +++ b/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_region_instance_templates.py @@ -1726,6 +1726,7 @@ def test_insert_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -2143,6 +2144,7 @@ def test_insert_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -2426,6 +2428,7 @@ def test_insert_unary_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -2821,6 +2824,7 @@ def test_insert_unary_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ diff --git a/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_region_instances.py b/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_region_instances.py index 88c18a46654e..ccec113ad1c2 100644 --- a/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_region_instances.py +++ b/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_region_instances.py @@ -726,6 +726,7 @@ def test_bulk_insert_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -1131,6 +1132,7 @@ def test_bulk_insert_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -1402,6 +1404,7 @@ def test_bulk_insert_unary_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -1785,6 +1788,7 @@ def test_bulk_insert_unary_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ diff --git a/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_resource_policies.py b/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_resource_policies.py index ca97652f3601..d6d7e3759826 100644 --- a/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_resource_policies.py +++ b/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_resource_policies.py @@ -3481,6 +3481,964 @@ def test_list_rest_pager(transport: str = "rest"): assert page_.raw_page.next_page_token == token +@pytest.mark.parametrize( + "request_type", + [ + compute.PatchResourcePolicyRequest, + dict, + ], +) +def test_patch_rest(request_type): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "project": "sample1", + "region": "sample2", + "resource_policy": "sample3", + } + request_init["resource_policy_resource"] = { + "creation_timestamp": "creation_timestamp_value", + "description": "description_value", + "disk_consistency_group_policy": {}, + "group_placement_policy": { + "availability_domain_count": 2650, + "collocation": "collocation_value", + "vm_count": 875, + }, + "id": 205, + "instance_schedule_policy": { + "expiration_time": "expiration_time_value", + "start_time": "start_time_value", + "time_zone": "time_zone_value", + "vm_start_schedule": {"schedule": "schedule_value"}, + "vm_stop_schedule": {}, + }, + "kind": "kind_value", + "name": "name_value", + "region": "region_value", + "resource_status": { + "instance_schedule_policy": { + "last_run_start_time": "last_run_start_time_value", + "next_run_start_time": "next_run_start_time_value", + } + }, + "self_link": "self_link_value", + "snapshot_schedule_policy": { + "retention_policy": { + "max_retention_days": 1933, + "on_source_disk_delete": "on_source_disk_delete_value", + }, + "schedule": { + "daily_schedule": { + "days_in_cycle": 1366, + "duration": "duration_value", + "start_time": "start_time_value", + }, + "hourly_schedule": { + "duration": "duration_value", + "hours_in_cycle": 1494, + "start_time": "start_time_value", + }, + "weekly_schedule": { + "day_of_weeks": [ + { + "day": "day_value", + "duration": "duration_value", + "start_time": "start_time_value", + } + ] + }, + }, + "snapshot_properties": { + "chain_name": "chain_name_value", + "guest_flush": True, + "labels": {}, + "storage_locations": [ + "storage_locations_value1", + "storage_locations_value2", + ], + }, + }, + "status": "status_value", + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id="client_operation_id_value", + creation_timestamp="creation_timestamp_value", + description="description_value", + end_time="end_time_value", + http_error_message="http_error_message_value", + http_error_status_code=2374, + id=205, + insert_time="insert_time_value", + kind="kind_value", + name="name_value", + operation_group_id="operation_group_id_value", + operation_type="operation_type_value", + progress=885, + region="region_value", + self_link="self_link_value", + start_time="start_time_value", + status=compute.Operation.Status.DONE, + status_message="status_message_value", + target_id=947, + target_link="target_link_value", + user="user_value", + zone="zone_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = compute.Operation.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, extended_operation.ExtendedOperation) + assert response.client_operation_id == "client_operation_id_value" + assert response.creation_timestamp == "creation_timestamp_value" + assert response.description == "description_value" + assert response.end_time == "end_time_value" + assert response.http_error_message == "http_error_message_value" + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == "insert_time_value" + assert response.kind == "kind_value" + assert response.name == "name_value" + assert response.operation_group_id == "operation_group_id_value" + assert response.operation_type == "operation_type_value" + assert response.progress == 885 + assert response.region == "region_value" + assert response.self_link == "self_link_value" + assert response.start_time == "start_time_value" + assert response.status == compute.Operation.Status.DONE + assert response.status_message == "status_message_value" + assert response.target_id == 947 + assert response.target_link == "target_link_value" + assert response.user == "user_value" + assert response.zone == "zone_value" + + +def test_patch_rest_required_fields(request_type=compute.PatchResourcePolicyRequest): + transport_class = transports.ResourcePoliciesRestTransport + + request_init = {} + request_init["project"] = "" + request_init["region"] = "" + request_init["resource_policy"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).patch._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["project"] = "project_value" + jsonified_request["region"] = "region_value" + jsonified_request["resourcePolicy"] = "resource_policy_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).patch._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "request_id", + "update_mask", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "project" in jsonified_request + assert jsonified_request["project"] == "project_value" + assert "region" in jsonified_request + assert jsonified_request["region"] == "region_value" + assert "resourcePolicy" in jsonified_request + assert jsonified_request["resourcePolicy"] == "resource_policy_value" + + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = compute.Operation.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.patch(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_patch_rest_unset_required_fields(): + transport = transports.ResourcePoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.patch._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "requestId", + "updateMask", + ) + ) + & set( + ( + "project", + "region", + "resourcePolicy", + "resourcePolicyResource", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_patch_rest_interceptors(null_interceptor): + transport = transports.ResourcePoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ResourcePoliciesRestInterceptor(), + ) + client = ResourcePoliciesClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ResourcePoliciesRestInterceptor, "post_patch" + ) as post, mock.patch.object( + transports.ResourcePoliciesRestInterceptor, "pre_patch" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = compute.PatchResourcePolicyRequest.pb( + compute.PatchResourcePolicyRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = compute.Operation.to_json(compute.Operation()) + + request = compute.PatchResourcePolicyRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = compute.Operation() + + client.patch( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_patch_rest_bad_request( + transport: str = "rest", request_type=compute.PatchResourcePolicyRequest +): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "project": "sample1", + "region": "sample2", + "resource_policy": "sample3", + } + request_init["resource_policy_resource"] = { + "creation_timestamp": "creation_timestamp_value", + "description": "description_value", + "disk_consistency_group_policy": {}, + "group_placement_policy": { + "availability_domain_count": 2650, + "collocation": "collocation_value", + "vm_count": 875, + }, + "id": 205, + "instance_schedule_policy": { + "expiration_time": "expiration_time_value", + "start_time": "start_time_value", + "time_zone": "time_zone_value", + "vm_start_schedule": {"schedule": "schedule_value"}, + "vm_stop_schedule": {}, + }, + "kind": "kind_value", + "name": "name_value", + "region": "region_value", + "resource_status": { + "instance_schedule_policy": { + "last_run_start_time": "last_run_start_time_value", + "next_run_start_time": "next_run_start_time_value", + } + }, + "self_link": "self_link_value", + "snapshot_schedule_policy": { + "retention_policy": { + "max_retention_days": 1933, + "on_source_disk_delete": "on_source_disk_delete_value", + }, + "schedule": { + "daily_schedule": { + "days_in_cycle": 1366, + "duration": "duration_value", + "start_time": "start_time_value", + }, + "hourly_schedule": { + "duration": "duration_value", + "hours_in_cycle": 1494, + "start_time": "start_time_value", + }, + "weekly_schedule": { + "day_of_weeks": [ + { + "day": "day_value", + "duration": "duration_value", + "start_time": "start_time_value", + } + ] + }, + }, + "snapshot_properties": { + "chain_name": "chain_name_value", + "guest_flush": True, + "labels": {}, + "storage_locations": [ + "storage_locations_value1", + "storage_locations_value2", + ], + }, + }, + "status": "status_value", + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_flattened(): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # get arguments that satisfy an http rule for this method + sample_request = { + "project": "sample1", + "region": "sample2", + "resource_policy": "sample3", + } + + # get truthy value for each flattened field + mock_args = dict( + project="project_value", + region="region_value", + resource_policy="resource_policy_value", + resource_policy_resource=compute.ResourcePolicy( + creation_timestamp="creation_timestamp_value" + ), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = compute.Operation.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/compute/v1/projects/{project}/regions/{region}/resourcePolicies/{resource_policy}" + % client.transport._host, + args[1], + ) + + +def test_patch_rest_flattened_error(transport: str = "rest"): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchResourcePolicyRequest(), + project="project_value", + region="region_value", + resource_policy="resource_policy_value", + resource_policy_resource=compute.ResourcePolicy( + creation_timestamp="creation_timestamp_value" + ), + ) + + +def test_patch_rest_error(): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + compute.PatchResourcePolicyRequest, + dict, + ], +) +def test_patch_unary_rest(request_type): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "project": "sample1", + "region": "sample2", + "resource_policy": "sample3", + } + request_init["resource_policy_resource"] = { + "creation_timestamp": "creation_timestamp_value", + "description": "description_value", + "disk_consistency_group_policy": {}, + "group_placement_policy": { + "availability_domain_count": 2650, + "collocation": "collocation_value", + "vm_count": 875, + }, + "id": 205, + "instance_schedule_policy": { + "expiration_time": "expiration_time_value", + "start_time": "start_time_value", + "time_zone": "time_zone_value", + "vm_start_schedule": {"schedule": "schedule_value"}, + "vm_stop_schedule": {}, + }, + "kind": "kind_value", + "name": "name_value", + "region": "region_value", + "resource_status": { + "instance_schedule_policy": { + "last_run_start_time": "last_run_start_time_value", + "next_run_start_time": "next_run_start_time_value", + } + }, + "self_link": "self_link_value", + "snapshot_schedule_policy": { + "retention_policy": { + "max_retention_days": 1933, + "on_source_disk_delete": "on_source_disk_delete_value", + }, + "schedule": { + "daily_schedule": { + "days_in_cycle": 1366, + "duration": "duration_value", + "start_time": "start_time_value", + }, + "hourly_schedule": { + "duration": "duration_value", + "hours_in_cycle": 1494, + "start_time": "start_time_value", + }, + "weekly_schedule": { + "day_of_weeks": [ + { + "day": "day_value", + "duration": "duration_value", + "start_time": "start_time_value", + } + ] + }, + }, + "snapshot_properties": { + "chain_name": "chain_name_value", + "guest_flush": True, + "labels": {}, + "storage_locations": [ + "storage_locations_value1", + "storage_locations_value2", + ], + }, + }, + "status": "status_value", + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id="client_operation_id_value", + creation_timestamp="creation_timestamp_value", + description="description_value", + end_time="end_time_value", + http_error_message="http_error_message_value", + http_error_status_code=2374, + id=205, + insert_time="insert_time_value", + kind="kind_value", + name="name_value", + operation_group_id="operation_group_id_value", + operation_type="operation_type_value", + progress=885, + region="region_value", + self_link="self_link_value", + start_time="start_time_value", + status=compute.Operation.Status.DONE, + status_message="status_message_value", + target_id=947, + target_link="target_link_value", + user="user_value", + zone="zone_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = compute.Operation.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.patch_unary(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + + +def test_patch_unary_rest_required_fields( + request_type=compute.PatchResourcePolicyRequest, +): + transport_class = transports.ResourcePoliciesRestTransport + + request_init = {} + request_init["project"] = "" + request_init["region"] = "" + request_init["resource_policy"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).patch._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["project"] = "project_value" + jsonified_request["region"] = "region_value" + jsonified_request["resourcePolicy"] = "resource_policy_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).patch._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "request_id", + "update_mask", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "project" in jsonified_request + assert jsonified_request["project"] == "project_value" + assert "region" in jsonified_request + assert jsonified_request["region"] == "region_value" + assert "resourcePolicy" in jsonified_request + assert jsonified_request["resourcePolicy"] == "resource_policy_value" + + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = compute.Operation.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.patch_unary(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_patch_unary_rest_unset_required_fields(): + transport = transports.ResourcePoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.patch._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "requestId", + "updateMask", + ) + ) + & set( + ( + "project", + "region", + "resourcePolicy", + "resourcePolicyResource", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_patch_unary_rest_interceptors(null_interceptor): + transport = transports.ResourcePoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ResourcePoliciesRestInterceptor(), + ) + client = ResourcePoliciesClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ResourcePoliciesRestInterceptor, "post_patch" + ) as post, mock.patch.object( + transports.ResourcePoliciesRestInterceptor, "pre_patch" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = compute.PatchResourcePolicyRequest.pb( + compute.PatchResourcePolicyRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = compute.Operation.to_json(compute.Operation()) + + request = compute.PatchResourcePolicyRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = compute.Operation() + + client.patch_unary( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_patch_unary_rest_bad_request( + transport: str = "rest", request_type=compute.PatchResourcePolicyRequest +): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "project": "sample1", + "region": "sample2", + "resource_policy": "sample3", + } + request_init["resource_policy_resource"] = { + "creation_timestamp": "creation_timestamp_value", + "description": "description_value", + "disk_consistency_group_policy": {}, + "group_placement_policy": { + "availability_domain_count": 2650, + "collocation": "collocation_value", + "vm_count": 875, + }, + "id": 205, + "instance_schedule_policy": { + "expiration_time": "expiration_time_value", + "start_time": "start_time_value", + "time_zone": "time_zone_value", + "vm_start_schedule": {"schedule": "schedule_value"}, + "vm_stop_schedule": {}, + }, + "kind": "kind_value", + "name": "name_value", + "region": "region_value", + "resource_status": { + "instance_schedule_policy": { + "last_run_start_time": "last_run_start_time_value", + "next_run_start_time": "next_run_start_time_value", + } + }, + "self_link": "self_link_value", + "snapshot_schedule_policy": { + "retention_policy": { + "max_retention_days": 1933, + "on_source_disk_delete": "on_source_disk_delete_value", + }, + "schedule": { + "daily_schedule": { + "days_in_cycle": 1366, + "duration": "duration_value", + "start_time": "start_time_value", + }, + "hourly_schedule": { + "duration": "duration_value", + "hours_in_cycle": 1494, + "start_time": "start_time_value", + }, + "weekly_schedule": { + "day_of_weeks": [ + { + "day": "day_value", + "duration": "duration_value", + "start_time": "start_time_value", + } + ] + }, + }, + "snapshot_properties": { + "chain_name": "chain_name_value", + "guest_flush": True, + "labels": {}, + "storage_locations": [ + "storage_locations_value1", + "storage_locations_value2", + ], + }, + }, + "status": "status_value", + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch_unary(request) + + +def test_patch_unary_rest_flattened(): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # get arguments that satisfy an http rule for this method + sample_request = { + "project": "sample1", + "region": "sample2", + "resource_policy": "sample3", + } + + # get truthy value for each flattened field + mock_args = dict( + project="project_value", + region="region_value", + resource_policy="resource_policy_value", + resource_policy_resource=compute.ResourcePolicy( + creation_timestamp="creation_timestamp_value" + ), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = compute.Operation.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.patch_unary(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/compute/v1/projects/{project}/regions/{region}/resourcePolicies/{resource_policy}" + % client.transport._host, + args[1], + ) + + +def test_patch_unary_rest_flattened_error(transport: str = "rest"): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch_unary( + compute.PatchResourcePolicyRequest(), + project="project_value", + region="region_value", + resource_policy="resource_policy_value", + resource_policy_resource=compute.ResourcePolicy( + creation_timestamp="creation_timestamp_value" + ), + ) + + +def test_patch_unary_rest_error(): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + @pytest.mark.parametrize( "request_type", [ @@ -4362,6 +5320,7 @@ def test_resource_policies_base_transport(): "get_iam_policy", "insert", "list", + "patch", "set_iam_policy", "test_iam_permissions", ) @@ -4519,6 +5478,9 @@ def test_resource_policies_client_transport_session_collision(transport_name): session1 = client1.transport.list._session session2 = client2.transport.list._session assert session1 != session2 + session1 = client1.transport.patch._session + session2 = client2.transport.patch._session + assert session1 != session2 session1 = client1.transport.set_iam_policy._session session2 = client2.transport.set_iam_policy._session assert session1 != session2 diff --git a/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_service_attachments.py b/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_service_attachments.py index 83b59ab400a3..074f6b74ea8a 100644 --- a/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_service_attachments.py +++ b/packages/google-cloud-compute/tests/unit/gapic/compute_v1/test_service_attachments.py @@ -2255,6 +2255,7 @@ def test_insert_rest(request_type): request_init["service_attachment_resource"] = { "connected_endpoints": [ { + "consumer_network": "consumer_network_value", "endpoint": "endpoint_value", "psc_connection_id": 1793, "status": "status_value", @@ -2524,6 +2525,7 @@ def test_insert_rest_bad_request( request_init["service_attachment_resource"] = { "connected_endpoints": [ { + "consumer_network": "consumer_network_value", "endpoint": "endpoint_value", "psc_connection_id": 1793, "status": "status_value", @@ -2592,7 +2594,7 @@ def test_insert_rest_flattened(): service_attachment_resource=compute.ServiceAttachment( connected_endpoints=[ compute.ServiceAttachmentConnectedEndpoint( - endpoint="endpoint_value" + consumer_network="consumer_network_value" ) ] ), @@ -2636,7 +2638,7 @@ def test_insert_rest_flattened_error(transport: str = "rest"): service_attachment_resource=compute.ServiceAttachment( connected_endpoints=[ compute.ServiceAttachmentConnectedEndpoint( - endpoint="endpoint_value" + consumer_network="consumer_network_value" ) ] ), @@ -2667,6 +2669,7 @@ def test_insert_unary_rest(request_type): request_init["service_attachment_resource"] = { "connected_endpoints": [ { + "consumer_network": "consumer_network_value", "endpoint": "endpoint_value", "psc_connection_id": 1793, "status": "status_value", @@ -2914,6 +2917,7 @@ def test_insert_unary_rest_bad_request( request_init["service_attachment_resource"] = { "connected_endpoints": [ { + "consumer_network": "consumer_network_value", "endpoint": "endpoint_value", "psc_connection_id": 1793, "status": "status_value", @@ -2982,7 +2986,7 @@ def test_insert_unary_rest_flattened(): service_attachment_resource=compute.ServiceAttachment( connected_endpoints=[ compute.ServiceAttachmentConnectedEndpoint( - endpoint="endpoint_value" + consumer_network="consumer_network_value" ) ] ), @@ -3026,7 +3030,7 @@ def test_insert_unary_rest_flattened_error(transport: str = "rest"): service_attachment_resource=compute.ServiceAttachment( connected_endpoints=[ compute.ServiceAttachmentConnectedEndpoint( - endpoint="endpoint_value" + consumer_network="consumer_network_value" ) ] ), @@ -3420,6 +3424,7 @@ def test_patch_rest(request_type): request_init["service_attachment_resource"] = { "connected_endpoints": [ { + "consumer_network": "consumer_network_value", "endpoint": "endpoint_value", "psc_connection_id": 1793, "status": "status_value", @@ -3696,6 +3701,7 @@ def test_patch_rest_bad_request( request_init["service_attachment_resource"] = { "connected_endpoints": [ { + "consumer_network": "consumer_network_value", "endpoint": "endpoint_value", "psc_connection_id": 1793, "status": "status_value", @@ -3769,7 +3775,7 @@ def test_patch_rest_flattened(): service_attachment_resource=compute.ServiceAttachment( connected_endpoints=[ compute.ServiceAttachmentConnectedEndpoint( - endpoint="endpoint_value" + consumer_network="consumer_network_value" ) ] ), @@ -3814,7 +3820,7 @@ def test_patch_rest_flattened_error(transport: str = "rest"): service_attachment_resource=compute.ServiceAttachment( connected_endpoints=[ compute.ServiceAttachmentConnectedEndpoint( - endpoint="endpoint_value" + consumer_network="consumer_network_value" ) ] ), @@ -3849,6 +3855,7 @@ def test_patch_unary_rest(request_type): request_init["service_attachment_resource"] = { "connected_endpoints": [ { + "consumer_network": "consumer_network_value", "endpoint": "endpoint_value", "psc_connection_id": 1793, "status": "status_value", @@ -4105,6 +4112,7 @@ def test_patch_unary_rest_bad_request( request_init["service_attachment_resource"] = { "connected_endpoints": [ { + "consumer_network": "consumer_network_value", "endpoint": "endpoint_value", "psc_connection_id": 1793, "status": "status_value", @@ -4178,7 +4186,7 @@ def test_patch_unary_rest_flattened(): service_attachment_resource=compute.ServiceAttachment( connected_endpoints=[ compute.ServiceAttachmentConnectedEndpoint( - endpoint="endpoint_value" + consumer_network="consumer_network_value" ) ] ), @@ -4223,7 +4231,7 @@ def test_patch_unary_rest_flattened_error(transport: str = "rest"): service_attachment_resource=compute.ServiceAttachment( connected_endpoints=[ compute.ServiceAttachmentConnectedEndpoint( - endpoint="endpoint_value" + consumer_network="consumer_network_value" ) ] ),