Skip to content

Commit

Permalink
feat: add enable_private_service_connect field to Endpoint (#238)
Browse files Browse the repository at this point in the history
feat: add id field to DeployedModel
feat: add service_attachment field to PrivateEndpoints
feat: add endpoint_id to CreateEndpointRequest and method signature to CreateEndpoint
feat: add method signature to CreateFeatureStore, CreateEntityType, CreateFeature
feat: add network and enable_private_service_connect to IndexEndpoint
feat: add service_attachment to IndexPrivateEndpoints
feat: add stratified_split field to training_pipeline InputDataConfig
fix: remove invalid resource annotations in LineageSubgraph

- [ ] Regenerate this pull request now.

PiperOrigin-RevId: 413686247

Source-Link: googleapis/googleapis@244a89d

Source-Link: googleapis/googleapis-gen@c485e44
Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYzQ4NWU0NGExYjJmZWY1MTZlOWJjYTM2NTE0ZDUwY2ViZDVlYTUxZiJ9
  • Loading branch information
gcf-owl-bot[bot] authored Dec 10, 2021
1 parent f35dc44 commit 352afaa
Show file tree
Hide file tree
Showing 62 changed files with 15,741 additions and 12,152 deletions.
9 changes: 2 additions & 7 deletions packages/google-cloud-aiplatform/linkinator.config.json
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,8 @@
"skip": [
"https://codecov.io/gh/googleapis/",
"www.googleapis.com",
"img.shields.io",
"https://console.cloud.google.com/cloudshell"
"img.shields.io"
],
"silent": true,
"concurrency": 5,
"retry": true,
"retryErrors": true,
"retryErrorsCount": 5,
"retryErrorsJitter": 3000
"concurrency": 5
}
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ message Artifact {
// Unspecified state for the Artifact.
STATE_UNSPECIFIED = 0;

// A state used by systems like Vertex Pipelines to indicate that the
// A state used by systems like Vertex AI Pipelines to indicate that the
// underlying data item represented by this Artifact is being created.
PENDING = 1;

Expand Down Expand Up @@ -84,7 +84,7 @@ message Artifact {

// The state of this Artifact. This is a property of the Artifact, and does
// not imply or capture any ongoing process. This property is managed by
// clients (such as Vertex Pipelines), and the system does not prescribe
// clients (such as Vertex AI Pipelines), and the system does not prescribe
// or check the validity of state transitions.
State state = 13;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import "google/cloud/aiplatform/v1beta1/job_state.proto";
import "google/cloud/aiplatform/v1beta1/machine_resources.proto";
import "google/cloud/aiplatform/v1beta1/manual_batch_tuning_parameters.proto";
import "google/cloud/aiplatform/v1beta1/model_monitoring.proto";
import "google/cloud/aiplatform/v1beta1/unmanaged_container_model.proto";
import "google/protobuf/struct.proto";
import "google/protobuf/timestamp.proto";
import "google/rpc/status.proto";
Expand Down Expand Up @@ -155,16 +156,19 @@ message BatchPredictionJob {
// Required. The user-defined name of this BatchPredictionJob.
string display_name = 2 [(google.api.field_behavior) = REQUIRED];

// Required. The name of the Model that produces the predictions via this job,
// The name of the Model resoure that produces the predictions via this job,
// must share the same ancestor Location.
// Starting this job has no impact on any existing deployments of the Model
// and their resources.
string model = 3 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Model"
}
];
// Exactly one of model and unmanaged_container_model must be set.
string model = 3 [(google.api.resource_reference) = {
type: "aiplatform.googleapis.com/Model"
}];

// Contains model information necessary to perform batch prediction without
// requiring uploading to model registry.
// Exactly one of model and unmanaged_container_model must be set.
UnmanagedContainerModel unmanaged_container_model = 28;

// Required. Input configuration of the instances on which predictions are performed.
// The schema of any single instance may be specified via
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -131,8 +131,11 @@ message CustomJobSpec {
// Where {project} is a project number, as in `12345`, and {network} is a
// network name.
//
// Private services access must already be configured for the network. If left
// unspecified, the job is not peered with any network.
// To specify this field, you must have already [configured VPC Network
// Peering for Vertex
// AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering).
//
// If this field is left unspecified, the job is not peered with any network.
string network = 5 [(google.api.resource_reference) = {
type: "compute.googleapis.com/Network"
}];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/cloud/aiplatform/v1beta1/encryption_spec.proto";
import "google/cloud/aiplatform/v1beta1/explanation.proto";
import "google/cloud/aiplatform/v1beta1/io.proto";
import "google/cloud/aiplatform/v1beta1/machine_resources.proto";
import "google/cloud/aiplatform/v1beta1/model_deployment_monitoring_job.proto";
import "google/cloud/aiplatform/v1beta1/model_monitoring.proto";
Expand Down Expand Up @@ -93,20 +94,31 @@ message Endpoint {
EncryptionSpec encryption_spec = 10;

// The full name of the Google Compute Engine
// [network](/compute/docs/networks-and-firewalls#networks) to which the
// Endpoint should be peered.
// [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks)
// to which the Endpoint should be peered.
//
// Private services access must already be configured for the network. If left
// unspecified, the Endpoint is not peered with any network.
//
// Only one of the fields, [network][google.cloud.aiplatform.v1beta1.Endpoint.network] or
// [enable_private_service_connect][google.cloud.aiplatform.v1beta1.Endpoint.enable_private_service_connect],
// can be set.
//
// [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert):
// projects/{project}/global/networks/{network}.
// Where {project} is a project number, as in '12345', and {network} is
// `projects/{project}/global/networks/{network}`.
// Where `{project}` is a project number, as in `12345`, and `{network}` is
// network name.
string network = 13 [(google.api.resource_reference) = {
type: "compute.googleapis.com/Network"
}];

// If true, expose the Endpoint via private service connect.
//
// Only one of the fields, [network][google.cloud.aiplatform.v1beta1.Endpoint.network] or
// [enable_private_service_connect][google.cloud.aiplatform.v1beta1.Endpoint.enable_private_service_connect],
// can be set.
bool enable_private_service_connect = 17;

// Output only. Resource name of the Model Monitoring job associated with this Endpoint
// if monitoring is enabled by [CreateModelDeploymentMonitoringJob][].
// Format:
Expand Down Expand Up @@ -136,8 +148,11 @@ message DeployedModel {
AutomaticResources automatic_resources = 8;
}

// Output only. The ID of the DeployedModel.
string id = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Immutable. The ID of the DeployedModel. If not provided upon deployment, Vertex AI
// will generate a value for this ID.
//
// This value should be 1-10 characters, and valid characters are /[0-9]/.
string id = 1 [(google.api.field_behavior) = IMMUTABLE];

// Required. The name of the Model that this is the deployment of. Note that the Model
// may be in a different location than the DeployedModel's Endpoint.
Expand Down Expand Up @@ -195,8 +210,11 @@ message DeployedModel {
PrivateEndpoints private_endpoints = 14 [(google.api.field_behavior) = OUTPUT_ONLY];
}

// PrivateEndpoints is used to provide paths for users to send
// requests via private services access.
// PrivateEndpoints proto is used to provide paths for users to send
// requests privately.
// To send request via private service access, use predict_http_uri,
// explain_http_uri or health_http_uri. To send request via private service
// connect, use service_attachment.
message PrivateEndpoints {
// Output only. Http(s) path to send prediction requests.
string predict_http_uri = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
Expand All @@ -206,4 +224,8 @@ message PrivateEndpoints {

// Output only. Http(s) path to send health check requests.
string health_http_uri = 3 [(google.api.field_behavior) = OUTPUT_ONLY];

// Output only. The name of the service attachment resource. Populated if private service
// connect is enabled.
string service_attachment = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
}
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ service EndpointService {
body: "endpoint"
};
option (google.api.method_signature) = "parent,endpoint";
option (google.api.method_signature) = "parent,endpoint,endpoint_id";
option (google.longrunning.operation_info) = {
response_type: "Endpoint"
metadata_type: "CreateEndpointOperationMetadata"
Expand Down Expand Up @@ -129,6 +130,16 @@ message CreateEndpointRequest {

// Required. The Endpoint to create.
Endpoint endpoint = 2 [(google.api.field_behavior) = REQUIRED];

// Immutable. The ID to use for endpoint, which will become the final
// component of the endpoint resource name.
// If not provided, Vertex AI will generate a value for this ID.
//
// This value should be 1-10 characters, and valid characters are /[0-9]/.
// When using HTTP/JSON, this field is populated based on a query string
// argument, such as `?endpoint_id=12345`. This is the fallback for fields
// that are not included in either the URI or the body.
string endpoint_id = 4 [(google.api.field_behavior) = IMMUTABLE];
}

// Runtime operation information for [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint].
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ message Execution {

// The state of this Execution. This is a property of the Execution, and does
// not imply or capture any ongoing process. This property is managed by
// clients (such as Vertex Pipelines) and the system does not prescribe
// clients (such as Vertex AI Pipelines) and the system does not prescribe
// or check the validity of state transitions.
State state = 6;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -252,6 +252,14 @@ message IntegratedGradientsAttribution {
// noise can help improve the computed gradients. Refer to this paper for more
// details: https://arxiv.org/pdf/1706.03825.pdf
SmoothGradConfig smooth_grad_config = 2;

// Config for IG with blur baseline.
//
// When enabled, a linear path from the maximally blurred image to the input
// image is created. Using a blurred baseline instead of zero (black image) is
// motivated by the BlurIG approach explained here:
// https://arxiv.org/abs/2004.03383
BlurBaselineConfig blur_baseline_config = 3;
}

// An explanation method that redistributes Integrated Gradients
Expand All @@ -275,6 +283,14 @@ message XraiAttribution {
// noise can help improve the computed gradients. Refer to this paper for more
// details: https://arxiv.org/pdf/1706.03825.pdf
SmoothGradConfig smooth_grad_config = 2;

// Config for XRAI with blur baseline.
//
// When enabled, a linear path from the maximally blurred image to the input
// image is created. Using a blurred baseline instead of zero (black image) is
// motivated by the BlurIG approach explained here:
// https://arxiv.org/abs/2004.03383
BlurBaselineConfig blur_baseline_config = 3;
}

// Config for SmoothGrad approximation of gradients.
Expand Down Expand Up @@ -341,6 +357,20 @@ message FeatureNoiseSigma {
repeated NoiseSigmaForFeature noise_sigma = 1;
}

// Config for blur baseline.
//
// When enabled, a linear path from the maximally blurred image to the input
// image is created. Using a blurred baseline instead of zero (black image) is
// motivated by the BlurIG approach explained here:
// https://arxiv.org/abs/2004.03383
message BlurBaselineConfig {
// The standard deviation of the blur kernel for the blurred baseline. The
// same blurring parameter is used for both the height and the width
// dimension. If not set, the method defaults to the zero (i.e. black for
// images) baseline.
float max_blur_sigma = 1;
}

// Similarity explainability that returns the nearest neighbors from the
// provided dataset.
message Similarity {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ message Feature {
FeaturestoreMonitoringConfig monitoring_config = 9 [(google.api.field_behavior) = OPTIONAL];

// Output only. A list of historical [Snapshot
// Analysis][google.cloud.aiplatform.master.FeaturestoreMonitoringConfig.SnapshotAnalysis]
// Analysis][FeaturestoreMonitoringConfig.SnapshotAnalysis]
// stats requested by user, sorted by [FeatureStatsAnomaly.start_time][google.cloud.aiplatform.v1beta1.FeatureStatsAnomaly.start_time]
// descending.
repeated FeatureStatsAnomaly monitoring_stats = 10 [(google.api.field_behavior) = OUTPUT_ONLY];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,9 @@ option java_package = "com.google.cloud.aiplatform.v1beta1";
option php_namespace = "Google\\Cloud\\AIPlatform\\V1beta1";
option ruby_package = "Google::Cloud::AIPlatform::V1beta1";

// Featurestore configuration information on how the Featurestore is configured.
// Vertex AI Feature Store provides a centralized repository for organizing,
// storing, and serving ML features. The Featurestore is a top-level container
// for your features and their values.
message Featurestore {
option (google.api.resource) = {
type: "aiplatform.googleapis.com/Featurestore"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,9 @@ message FeatureValue {
// Feature generation timestamp. Typically, it is provided by user at
// feature ingestion time. If not, feature store
// will use the system timestamp when the data is ingested into feature
// store.
// store. For streaming ingestion, the time, aligned by days, must be no
// older than five years (1825 days) and no later than one year (366 days)
// in the future.
google.protobuf.Timestamp generate_time = 1;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ service FeaturestoreService {
body: "featurestore"
};
option (google.api.method_signature) = "parent,featurestore";
option (google.api.method_signature) = "parent,featurestore,featurestore_id";
option (google.longrunning.operation_info) = {
response_type: "Featurestore"
metadata_type: "CreateFeaturestoreOperationMetadata"
Expand Down Expand Up @@ -107,6 +108,7 @@ service FeaturestoreService {
body: "entity_type"
};
option (google.api.method_signature) = "parent,entity_type";
option (google.api.method_signature) = "parent,entity_type,entity_type_id";
option (google.longrunning.operation_info) = {
response_type: "EntityType"
metadata_type: "CreateEntityTypeOperationMetadata"
Expand Down Expand Up @@ -159,6 +161,7 @@ service FeaturestoreService {
body: "feature"
};
option (google.api.method_signature) = "parent,feature";
option (google.api.method_signature) = "parent,feature,feature_id";
option (google.longrunning.operation_info) = {
response_type: "Feature"
metadata_type: "CreateFeatureOperationMetadata"
Expand Down Expand Up @@ -608,18 +611,43 @@ message BatchReadFeatureValuesRequest {

// Request message for [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues].
message ExportFeatureValuesRequest {
// Describes exporting Feature values as of the snapshot timestamp.
// Describes exporting the latest Feature values of all entities of the
// EntityType between [start_time, snapshot_time].
message SnapshotExport {
// Exports Feature values as of this timestamp. If not set,
// retrieve values as of now. Timestamp, if present, must not have higher
// than millisecond precision.
google.protobuf.Timestamp snapshot_time = 1;

// Excludes Feature values with feature generation timestamp before this
// timestamp. If not set, retrieve oldest values kept in Feature Store.
// Timestamp, if present, must not have higher than millisecond precision.
google.protobuf.Timestamp start_time = 2;
}

// Describes exporting all historical Feature values of all entities of the
// EntityType between [start_time, end_time].
message FullExport {
// Excludes Feature values with feature generation timestamp before this
// timestamp. If not set, retrieve oldest values kept in Feature Store.
// Timestamp, if present, must not have higher than millisecond precision.
google.protobuf.Timestamp start_time = 2;

// Exports Feature values as of this timestamp. If not set,
// retrieve values as of now. Timestamp, if present, must not have higher
// than millisecond precision.
google.protobuf.Timestamp end_time = 1;
}

// Required. The mode in which Feature values are exported.
oneof mode {
// Exports Feature values of all entities of the EntityType as of a snapshot
// time.
// Exports the latest Feature values of all entities of the EntityType
// within a time range.
SnapshotExport snapshot_export = 3;

// Exports all historical values of all entities of the EntityType within a
// time range
FullExport full_export = 7;
}

// Required. The resource name of the EntityType from which to export Feature values.
Expand Down Expand Up @@ -1141,15 +1169,15 @@ message UpdateFeaturestoreOperationMetadata {
GenericOperationMetadata generic_metadata = 1;
}

// Details of operations that perform import feature values.
// Details of operations that perform import Feature values.
message ImportFeatureValuesOperationMetadata {
// Operation metadata for Featurestore import feature values.
// Operation metadata for Featurestore import Feature values.
GenericOperationMetadata generic_metadata = 1;

// Number of entities that have been imported by the operation.
int64 imported_entity_count = 2;

// Number of feature values that have been imported by the operation.
// Number of Feature values that have been imported by the operation.
int64 imported_feature_value_count = 3;

// The number of rows in input source that weren't imported due to either
Expand Down
Loading

0 comments on commit 352afaa

Please sign in to comment.