Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

image tag update for release #114

Merged
merged 4 commits into from
Nov 7, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion bootstrapper.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ spec:
spec:
containers:
- name: deploy
image: gcr.io/ml-pipeline/bootstrapper:0.0.42
image: gcr.io/ml-pipeline/bootstrapper:0.1.0
imagePullPolicy: 'Always'
# Additional parameter available:
args: [
Expand Down
2 changes: 1 addition & 1 deletion components/kubeflow/launcher/kubeflow_tfjob_launcher_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
def kubeflow_tfjob_launcher_op(container_image, command, number_of_workers: int, number_of_parameter_servers: int, tfjob_timeout_minutes: int, output_dir=None, step_name='TFJob-launcher'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf:0.0.42', #TODO: Update the name in next release.
gaoning777 marked this conversation as resolved.
Show resolved Hide resolved
image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf:0.1.0',
arguments = [
'--workers', number_of_workers,
'--pss', number_of_parameter_servers,
Expand Down
6 changes: 3 additions & 3 deletions components/kubeflow/launcher/train.template.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ spec:
spec:
containers:
- name: tensorflow
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:0.0.42
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:0.1.0
command:
- python
- -m
Expand All @@ -38,7 +38,7 @@ spec:
spec:
containers:
- name: tensorflow
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:0.0.42
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:0.1.0
command:
- python
- -m
Expand All @@ -50,7 +50,7 @@ spec:
spec:
containers:
- name: tensorflow
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:0.0.42
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:0.1.0
command:
- python
- -m
Expand Down
8 changes: 4 additions & 4 deletions ml-pipeline/ml-pipeline/prototypes/ml-pipeline.jsonnet
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@
// @shortDescription ML pipeline
// @param name string Name to give to each of the components
// @optionalParam namespace string default Namespace
// @optionalParam api_image string gcr.io/ml-pipeline/api-server:0.0.42 API docker image
// @optionalParam scheduledworkflow_image string gcr.io/ml-pipeline/scheduledworkflow:0.0.42 schedule workflow docker image
// @optionalParam persistenceagent_image string gcr.io/ml-pipeline/persistenceagent:0.0.42 persistence agent docker image
// @optionalParam ui_image string gcr.io/ml-pipeline/frontend:0.0.42 UI docker image
// @optionalParam api_image string gcr.io/ml-pipeline/api-server:0.1.0 API docker image
// @optionalParam scheduledworkflow_image string gcr.io/ml-pipeline/scheduledworkflow:0.1.0 schedule workflow docker image
// @optionalParam persistenceagent_image string gcr.io/ml-pipeline/persistenceagent:0.1.0 persistence agent docker image
// @optionalParam ui_image string gcr.io/ml-pipeline/frontend:0.1.0 UI docker image
// @optionalParam deploy_argo string false flag to deploy argo
// @optionalParam report_usage string false flag to report usage

Expand Down
8 changes: 4 additions & 4 deletions samples/kubeflow-tf/kubeflow-training-classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
def dataflow_tf_transform_op(train_data: 'GcsUri', evaluation_data: 'GcsUri', schema: 'GcsUri[text/json]', project: 'GcpProject', preprocess_mode, preprocess_module: 'GcsUri[text/code/python]', transform_output: 'GcsUri[Directory]', step_name='preprocess'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tft:0.0.42',
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tft:0.1.0',
arguments = [
'--train', train_data,
'--eval', evaluation_data,
Expand All @@ -37,7 +37,7 @@ def dataflow_tf_transform_op(train_data: 'GcsUri', evaluation_data: 'GcsUri', sc
def kubeflow_tf_training_op(transformed_data_dir, schema: 'GcsUri[text/json]', learning_rate: float, hidden_layer_size: int, steps: int, target, preprocess_module: 'GcsUri[text/code/python]', training_output: 'GcsUri[Directory]', step_name='training'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:0.0.42',
image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:0.1.0',
arguments = [
'--transformed-data-dir', transformed_data_dir,
'--schema', schema,
Expand All @@ -54,7 +54,7 @@ def kubeflow_tf_training_op(transformed_data_dir, schema: 'GcsUri[text/json]', l
def dataflow_tf_predict_op(evaluation_data: 'GcsUri', schema: 'GcsUri[text/json]', target: str, model: 'TensorFlow model', predict_mode, project: 'GcpProject', prediction_output: 'GcsUri', step_name='prediction'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tf-predict:0.0.42',
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tf-predict:0.1.0',
arguments = [
'--data', evaluation_data,
'--schema', schema,
Expand All @@ -70,7 +70,7 @@ def dataflow_tf_predict_op(evaluation_data: 'GcsUri', schema: 'GcsUri[text/json]
def confusion_matrix_op(predictions, output, step_name='confusionmatrix'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-local-confusion-matrix:0.0.42',
image = 'gcr.io/ml-pipeline/ml-pipeline-local-confusion-matrix:0.1.0',
arguments = [
'--predictions', predictions,
'--output', output,
Expand Down
12 changes: 6 additions & 6 deletions samples/tfx/taxi-cab-classification-pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
def dataflow_tf_data_validation_op(inference_data: 'GcsUri', validation_data: 'GcsUri', column_names: 'GcsUri[text/json]', key_columns, project: 'GcpProject', mode, validation_output: 'GcsUri[Directory]', step_name='validation'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tfdv:dev',
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tfdv:0.1.0',
arguments = [
'--csv-data-for-inference', inference_data,
'--csv-data-to-validate', validation_data,
Expand All @@ -39,7 +39,7 @@ def dataflow_tf_data_validation_op(inference_data: 'GcsUri', validation_data: 'G
def dataflow_tf_transform_op(train_data: 'GcsUri', evaluation_data: 'GcsUri', schema: 'GcsUri[text/json]', project: 'GcpProject', preprocess_mode, preprocess_module: 'GcsUri[text/code/python]', transform_output: 'GcsUri[Directory]', step_name='preprocess'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tft:0.0.42',
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tft:0.1.0',
arguments = [
'--train', train_data,
'--eval', evaluation_data,
Expand All @@ -56,7 +56,7 @@ def dataflow_tf_transform_op(train_data: 'GcsUri', evaluation_data: 'GcsUri', sc
def tf_train_op(transformed_data_dir, schema: 'GcsUri[text/json]', learning_rate: float, hidden_layer_size: int, steps: int, target: str, preprocess_module: 'GcsUri[text/code/python]', training_output: 'GcsUri[Directory]', step_name='training'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:0.0.42',
image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:0.1.0',
arguments = [
'--transformed-data-dir', transformed_data_dir,
'--schema', schema,
Expand All @@ -73,7 +73,7 @@ def tf_train_op(transformed_data_dir, schema: 'GcsUri[text/json]', learning_rate
def dataflow_tf_model_analyze_op(model: 'TensorFlow model', evaluation_data: 'GcsUri', schema: 'GcsUri[text/json]', project: 'GcpProject', analyze_mode, analyze_slice_column, analysis_output: 'GcsUri', step_name='analysis'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tfma:0.0.42',
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tfma:0.1.0',
arguments = [
'--model', model,
'--eval', evaluation_data,
Expand All @@ -90,7 +90,7 @@ def dataflow_tf_model_analyze_op(model: 'TensorFlow model', evaluation_data: 'Gc
def dataflow_tf_predict_op(evaluation_data: 'GcsUri', schema: 'GcsUri[text/json]', target: str, model: 'TensorFlow model', predict_mode, project: 'GcpProject', prediction_output: 'GcsUri', step_name='prediction'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tf-predict:0.0.42',
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tf-predict:0.1.0',
arguments = [
'--data', evaluation_data,
'--schema', schema,
Expand All @@ -106,7 +106,7 @@ def dataflow_tf_predict_op(evaluation_data: 'GcsUri', schema: 'GcsUri[text/json]
def kubeflow_deploy_op(model: 'TensorFlow model', tf_server_name, step_name='deploy'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-deployer:dev', #TODO: change the tag to the release versions when new releases are built with the updated image
image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-deployer:0.1.0',
arguments = [
'--model-path', model,
'--server-name', tf_server_name
Expand Down
16 changes: 8 additions & 8 deletions samples/xgboost-spark/xgboost-training-cm.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class CreateClusterOp(dsl.ContainerOp):
def __init__(self, name, project, region, staging):
super(CreateClusterOp, self).__init__(
name=name,
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-create-cluster:0.0.42',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-create-cluster:0.1.0',
arguments=[
'--project', project,
'--region', region,
Expand All @@ -40,7 +40,7 @@ class DeleteClusterOp(dsl.ContainerOp):
def __init__(self, name, project, region):
super(DeleteClusterOp, self).__init__(
name=name,
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-delete-cluster:0.0.42',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-delete-cluster:0.1.0',
arguments=[
'--project', project,
'--region', region,
Expand All @@ -54,7 +54,7 @@ class AnalyzeOp(dsl.ContainerOp):
def __init__(self, name, project, region, cluster_name, schema, train_data, output):
super(AnalyzeOp, self).__init__(
name=name,
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-analyze:0.0.42',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-analyze:0.1.0',
arguments=[
'--project', project,
'--region', region,
Expand All @@ -72,7 +72,7 @@ def __init__(self, name, project, region, cluster_name, train_data, eval_data,
target, analysis, output):
super(TransformOp, self).__init__(
name=name,
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-transform:0.0.42',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-transform:0.1.0',
arguments=[
'--project', project,
'--region', region,
Expand All @@ -97,7 +97,7 @@ def __init__(self, name, project, region, cluster_name, train_data, eval_data,

super(TrainerOp, self).__init__(
name=name,
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-train:0.0.42',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-train:0.1.0',
arguments=[
'--project', project,
'--region', region,
Expand All @@ -120,7 +120,7 @@ class PredictOp(dsl.ContainerOp):
def __init__(self, name, project, region, cluster_name, data, model, target, analysis, output):
super(PredictOp, self).__init__(
name=name,
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-predict:0.0.42',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-predict:0.1.0',
arguments=[
'--project', project,
'--region', region,
Expand All @@ -140,7 +140,7 @@ class ConfusionMatrixOp(dsl.ContainerOp):
def __init__(self, name, predictions, output):
super(ConfusionMatrixOp, self).__init__(
name=name,
image='gcr.io/ml-pipeline/ml-pipeline-local-confusion-matrix:0.0.42',
image='gcr.io/ml-pipeline/ml-pipeline-local-confusion-matrix:0.1.0',
arguments=[
'--output', output,
'--predictions', predictions
Expand All @@ -152,7 +152,7 @@ class RocOp(dsl.ContainerOp):
def __init__(self, name, predictions, trueclass, output):
super(RocOp, self).__init__(
name=name,
image='gcr.io/ml-pipeline/ml-pipeline-local-roc:0.0.42',
image='gcr.io/ml-pipeline/ml-pipeline-local-roc:0.1.0',
arguments=[
'--output', output,
'--predictions', predictions,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
def kubeflow_tfjob_launcher_op(container_image, command, number_of_workers: int, number_of_parameter_servers: int, tfjob_timeout_minutes: int, output_dir=None, step_name='TFJob-launcher'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf:0.0.42', #TODO: Update the name in next release.
image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf:0.1.0',
arguments = [
'--workers', number_of_workers,
'--pss', number_of_parameter_servers,
Expand Down
2 changes: 1 addition & 1 deletion uninstaller.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ spec:
spec:
containers:
- name: uninstaller
image: gcr.io/ml-pipeline/bootstrapper:0.0.42
image: gcr.io/ml-pipeline/bootstrapper:0.1.0
imagePullPolicy: 'Always'
# Additional parameter available:
args: [
Expand Down