Skip to content

Commit 000e0d6

Browse files
chensuyueyiliu30
authored andcommitted
CI FWs/PyThon version update (#1272)
Signed-off-by: chensuyue <suyue.chen@intel.com> Co-authored-by: yiliu30 <yi4.liu@intel.com> Signed-off-by: bmyrcha <bartosz.myrcha@intel.com>
1 parent 6da8fb2 commit 000e0d6

File tree

15 files changed

+49
-36
lines changed

15 files changed

+49
-36
lines changed

.azure-pipelines/docker/Dockerfile.devel

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
15-
ARG UBUNTU_VER=20.04
15+
ARG UBUNTU_VER=22.04
1616
FROM ubuntu:${UBUNTU_VER} as devel
1717

1818
# See http://bugs.python.org/issue19846

.azure-pipelines/docker/DockerfileCodeScan.devel

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
1515

16-
ARG UBUNTU_VER=20.04
16+
ARG UBUNTU_VER=22.04
1717
FROM ubuntu:${UBUNTU_VER} as devel
1818

1919
# See http://bugs.python.org/issue19846

.azure-pipelines/docker/DockerfileWithNC.devel

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
1515

16-
ARG UBUNTU_VER=20.04
16+
ARG UBUNTU_VER=22.04
1717
FROM ubuntu:${UBUNTU_VER} as devel
1818

1919
# See http://bugs.python.org/issue19846

.azure-pipelines/scripts/codeScan/pylint/pylint.sh

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ apt-get install -y --no-install-recommends --fix-missing \
2020
build-essential
2121

2222
pip install -r /neural-compressor/requirements.txt
23+
pip install cmake
2324

2425
pip install torch==1.12.0 \
2526
horovod \
@@ -52,13 +53,14 @@ elif [ "${scan_module}" = "neural_insights" ]; then
5253
fi
5354

5455
python -m pylint -f json --disable=R,C,W,E1129 --enable=line-too-long --max-line-length=120 --extension-pkg-whitelist=numpy --ignored-classes=TensorProto,NodeProto \
55-
--ignored-modules=tensorflow,torch,torch.quantization,torch.tensor,torchvision,fairseq,mxnet,onnx,onnxruntime,intel_extension_for_pytorch,intel_extension_for_tensorflow,torchinfo /neural-compressor/${scan_module} \
56-
>$log_dir/pylint.json
56+
--ignored-modules=tensorflow,torch,torch.quantization,torch.tensor,torchvision,fairseq,mxnet,onnx,onnxruntime,intel_extension_for_pytorch,intel_extension_for_tensorflow,torchinfo,horovod,transformers \
57+
/neural-compressor/${scan_module} > $log_dir/pylint.json
5758

5859
exit_code=$?
5960

6061
$BOLD_YELLOW && echo " ----------------- Current pylint cmd start --------------------------" && $RESET
61-
echo "python -m pylint -f json --disable=R,C,W,E1129 --enable=line-too-long --max-line-length=120 --extension-pkg-whitelist=numpy --ignored-classes=TensorProto,NodeProto --ignored-modules=tensorflow,torch,torch.quantization,torch.tensor,torchvision,fairseq,mxnet,onnx,onnxruntime,intel_extension_for_pytorch /neural-compressor/${scan_module} > $log_dir/pylint.json"
62+
echo "python -m pylint -f json --disable=R,C,W,E1129 --enable=line-too-long --max-line-length=120 --extension-pkg-whitelist=numpy --ignored-classes=TensorProto,NodeProto --ignored-modules=tensorflow,torch,torch.quantization,torch.tensor,torchvision,fairseq,mxnet,onnx,onnxruntime,intel_extension_for_pytorch,intel_extension_for_tensorflow,torchinfo,horovod,transformers
63+
/neural-compressor/${scan_module}>$log_dir/pylint.json"
6264
$BOLD_YELLOW && echo " ----------------- Current pylint cmd end --------------------------" && $RESET
6365

6466
$BOLD_YELLOW && echo " ----------------- Current log file output start --------------------------" && $RESET

.azure-pipelines/scripts/models/env_setup.sh

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -87,20 +87,14 @@ elif [[ "${framework}" == "pytorch" ]]; then
8787
pip install torch==${fwk_ver} -f https://download.pytorch.org/whl/torch_stable.html
8888
pip install torchvision==${torch_vision_ver} -f https://download.pytorch.org/whl/torch_stable.html
8989
elif [[ "${framework}" == "onnxrt" ]]; then
90-
pip install onnx==1.14.0
90+
pip install onnx==1.14.1
9191
pip install onnxruntime==${fwk_ver}
9292
elif [[ "${framework}" == "mxnet" ]]; then
9393
pip install numpy==1.23.5
9494
echo "re-install pycocotools resolve the issue with numpy..."
9595
pip uninstall pycocotools -y
9696
pip install --no-cache-dir pycocotools
97-
if [[ "${fwk_ver}" == "1.7.0" ]]; then
98-
pip install mxnet==${fwk_ver}.post2
99-
elif [[ "${fwk_ver}" == "1.6.0" ]]; then
100-
pip install mxnet-mkl==${mxnet_version}
101-
else
102-
pip install mxnet==${fwk_ver}
103-
fi
97+
pip install mxnet==${fwk_ver}
10498
fi
10599

106100
if [ -f "requirements.txt" ]; then

.azure-pipelines/scripts/models/run_onnxrt_models_trigger.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ do
2222
done
2323

2424
FRAMEWORK="onnxrt"
25-
FRAMEWORK_VERSION="1.15.0"
25+
FRAMEWORK_VERSION="1.15.1"
2626

2727
inc_new_api=false
2828
# ======== set up config for onnxrt models ========

.azure-pipelines/scripts/models/run_pytorch_models_trigger.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@ do
2222
done
2323

2424
FRAMEWORK="pytorch"
25-
FRAMEWORK_VERSION="2.0.0+cpu"
26-
TORCH_VISION_VERSION="0.15.1+cpu"
25+
FRAMEWORK_VERSION="2.0.1+cpu"
26+
TORCH_VISION_VERSION="0.15.2+cpu"
2727

2828
inc_new_api=false
2929
# ======== set up config for pytorch models ========

.azure-pipelines/scripts/models/run_tensorflow_models_trigger.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ do
2222
done
2323

2424
FRAMEWORK="tensorflow"
25-
FRAMEWORK_VERSION="2.12.0"
25+
FRAMEWORK_VERSION="2.13.0"
2626

2727
inc_new_api=false
2828
# ======== set up config for tensorflow models ========

.azure-pipelines/scripts/ut/env_setup.sh

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,8 @@ echo "========= test case is ${test_case}"
2424
if [[ "${tensorflow_version}" == *"-official" ]]; then
2525
pip install tensorflow==${tensorflow_version%-official}
2626
elif [[ "${tensorflow_version}" == "spr-base" ]]; then
27-
pip install /tf_dataset/tf_binary/221212/tensorflow*.whl
27+
pip install /tf_dataset/tf_binary/230928/tensorflow*.whl
28+
pip install cmake
2829
pip install protobuf==3.20.3
2930
pip install horovod==0.27.0
3031
if [[ $? -ne 0 ]]; then
@@ -48,10 +49,13 @@ if [[ "${torchvision_version}" != "" ]]; then
4849
fi
4950

5051
if [[ "${ipex_version}" == "1.13.0+cpu" ]]; then
51-
ipex_whl="https://github.com/intel/intel-extension-for-pytorch/releases/download/v1.13.0%2Bcpu/intel_extension_for_pytorch-1.13.0-cp38-cp38-manylinux2014_x86_64.whl"
52+
ipex_whl="https://github.com/intel/intel-extension-for-pytorch/releases/download/v1.13.0%2Bcpu/intel_extension_for_pytorch-1.13.0-cp310-cp310-manylinux2014_x86_64.whl"
5253
pip install $ipex_whl
5354
elif [[ "${ipex_version}" == "2.0.0+cpu" ]]; then
54-
ipex_whl="https://intel-extension-for-pytorch.s3.amazonaws.com/ipex_stable/cpu/intel_extension_for_pytorch-2.0.0%2Bcpu-cp38-cp38-linux_x86_64.whl"
55+
ipex_whl="https://intel-extension-for-pytorch.s3.amazonaws.com/ipex_stable/cpu/intel_extension_for_pytorch-2.0.0%2Bcpu-cp310-cp310-linux_x86_64.whl"
56+
pip install $ipex_whl
57+
elif [[ "${ipex_version}" == "2.0.1+cpu" ]]; then
58+
ipex_whl="https://intel-extension-for-pytorch.s3.amazonaws.com/ipex_stable/cpu/intel_extension_for_pytorch-2.0.100%2Bcpu-cp310-cp310-linux_x86_64.whl"
5559
pip install $ipex_whl
5660
elif [[ "${ipex_version}" == "2.1.0" ]]; then
5761
pip install /tf_dataset/pt_binary/ww32/torch-*.whl
@@ -65,7 +69,11 @@ fi
6569

6670
if [[ "${onnxruntime_version}" != "" ]]; then
6771
pip install onnxruntime==${onnxruntime_version}
68-
pip install onnxruntime-extensions==0.8.0
72+
if [[ "${onnxruntime_version}" == "1.14"* ]]; then
73+
pip install onnxruntime-extensions==0.8.0
74+
else
75+
pip install onnxruntime-extensions
76+
fi
6977
pip install optimum
7078
fi
7179

@@ -79,8 +87,10 @@ fi
7987

8088
# install special test env requirements
8189
# common deps
82-
pip install transformers
90+
pip install cmake
8391
pip install horovod
92+
pip install transformers
93+
8494
if [[ $(echo "${test_case}" | grep -c "others") != 0 ]];then
8595
pip install tf_slim xgboost accelerate==0.21.0
8696
elif [[ $(echo "${test_case}" | grep -c "nas") != 0 ]]; then

.azure-pipelines/scripts/ut/ut_fwk_version.sh

Lines changed: 14 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -4,23 +4,28 @@ echo "export UT fwk version..."
44
test_mode=$1
55

66
if [ "$test_mode" == "coverage" ]; then
7-
export tensorflow_version='2.12.0'
8-
export pytorch_version='2.0.0+cpu'
9-
export torchvision_version='0.15.1+cpu'
10-
export ipex_version='2.0.0+cpu'
11-
export onnx_version='1.13.1'
12-
export onnxruntime_version='1.14.1'
7+
export tensorflow_version='2.13.0'
8+
export pytorch_version='2.0.1+cpu'
9+
export torchvision_version='0.15.2+cpu'
10+
export ipex_version='2.0.1+cpu'
11+
export onnx_version='1.14.1'
12+
export onnxruntime_version='1.15.1'
1313
export mxnet_version='1.9.1'
1414
else
15-
export tensorflow_version='2.11.0'
15+
export tensorflow_version='2.12.0'
1616
export pytorch_version='1.13.0+cpu'
1717
export torchvision_version='0.14.0+cpu'
1818
export ipex_version='1.13.0+cpu'
19-
export onnx_version='1.13.0'
20-
export onnxruntime_version='1.13.1'
19+
export onnx_version='1.13.1'
20+
export onnxruntime_version='1.14.1'
2121
export mxnet_version='1.9.1'
2222
fi
2323

24+
# import torch before import tensorflow
25+
cd /neural-compressor/test || exit 1
26+
find . -name "test*.py" | xargs sed -i 's/import tensorflow as tf/import torch; import tensorflow as tf/g'
27+
find . -name "test*.py" | xargs sed -i 's/import tensorflow.compat.v1 as tf/import torch; import tensorflow.compat.v1 as tf/g'
28+
find . -name "test*.py" | xargs sed -i 's/from tensorflow import keras/import torch; from tensorflow import keras/g'
2429

2530

2631

0 commit comments

Comments
 (0)