diff --git a/docs/readthedocs/source/doc/PPML/QuickStart/tpc-h_with_sparksql_on_k8s.md b/docs/readthedocs/source/doc/PPML/QuickStart/tpc-h_with_sparksql_on_k8s.md index 86fbe2ebf0e..51bc3e0d1f4 100644 --- a/docs/readthedocs/source/doc/PPML/QuickStart/tpc-h_with_sparksql_on_k8s.md +++ b/docs/readthedocs/source/doc/PPML/QuickStart/tpc-h_with_sparksql_on_k8s.md @@ -28,7 +28,7 @@ Generate input data with size ~100GB (user can adjust data size to need): ### Deploy PPML TPC-H on Kubernetes ### 1. Pull docker image ``` -sudo docker pull intelanalytics/bigdl-ppml-trusted-big-data-ml-python-graphene:0.14.0-SNAPSHOT +sudo docker pull intelanalytics/bigdl-ppml-trusted-big-data-ml-python-graphene:2.1.0-SNAPSHOT ``` 2. Prepare SGX keys, make sure keys and tpch-spark can be accessed on each K8S node 3. Start a bigdl-ppml enabled Spark K8S client container with configured local IP, key, tpch and kuberconfig path @@ -38,7 +38,7 @@ export DATA_PATH=/root/zoo-tutorials/tpch-spark export KEYS_PATH=/root/keys export KUBERCONFIG_PATH=/root/kuberconfig export LOCAL_IP=$local_ip -export DOCKER_IMAGE=intelanalytics/bigdl-ppml-trusted-big-data-ml-python-graphene:0.14.0-SNAPSHOT +export DOCKER_IMAGE=intelanalytics/bigdl-ppml-trusted-big-data-ml-python-graphene:2.1.0-SNAPSHOT sudo docker run -itd \ --privileged \ --net=host \