Skip to content

Commit ee01566

Browse files
xhuan28RodgerZhu
andauthored
secure logistic regression inference based on HE and SGX (#151)
* inital commit Signed-off-by: Xiaojun Huang <xiaojun.huang@intel.com> * first commit of lr_sgx_he solution Signed-off-by: Huang, Xiaojun <xiaojun.huang@intel.com> * infer client and infer server communicate via grpc Signed-off-by: Huang, Xiaojun <xiaojun.huang@intel.com> * add dockerfile and build scripts Signed-off-by: Huang, Xiaojun <xiaojun.huang@intel.com> * update dockerfile and build scripts Signed-off-by: Huang, Xiaojun <xiaojun.huang@intel.com> * Create README.md * Update README.md * update license wording in each file Signed-off-by: Huang, Xiaojun <xiaojun.huang@intel.com> * update the license date of gflags.cmake Signed-off-by: Huang, Xiaojun <xiaojun.huang@intel.com> * add the doc of lr_infer_he_sgx Signed-off-by: Huang, Xiaojun <xiaojun.huang@intel.com> Signed-off-by: Xiaojun Huang <xiaojun.huang@intel.com> Signed-off-by: Huang, Xiaojun <xiaojun.huang@intel.com> Co-authored-by: Zhu Yunge <yunge.zhu@intel.com>
1 parent 52da531 commit ee01566

27 files changed

+3802
-0
lines changed

cczoo/lr_infer_he_sgx/CMakeLists.txt

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
# Copyright (c) 2022 Intel Corporation
2+
# SPDX-License-Identifier: Apache-2.0
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
cmake_minimum_required(VERSION 3.15)
16+
17+
project(lr_infer_he_sgx)
18+
19+
option(ENABLE_INTEL_HEXL ON)
20+
21+
set(CMAKE_CXX_STANDARD 17)
22+
set(CMAKE_CXX_EXTENSIONS OFF)
23+
set(CMAKE_CXX_STANDARD_REQUIRED ON)
24+
25+
include(cmake/seal.cmake)
26+
include(cmake/grpc.cmake)
27+
include(cmake/gflags.cmake)
28+
29+
include_directories("${CMAKE_CURRENT_BINARY_DIR}")
30+
include_directories("${SEAL_INC_DIR}")
31+
32+
add_subdirectory(src)

cczoo/lr_infer_he_sgx/Dockerfile

Lines changed: 117 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,117 @@
1+
#
2+
# Copyright (c) 2022 Intel Corporation
3+
# SPDX-License-Identifier: Apache-2.0
4+
# Licensed under the Apache License, Version 2.0 (the "License");
5+
# you may not use this file except in compliance with the License.
6+
# You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
16+
# https://github.com/oscarlab/graphene/blob/master/Tools/gsc/images/graphene_aks.latest.dockerfile
17+
18+
FROM ubuntu:20.04
19+
20+
ENV DEBIAN_FRONTEND=noninteractive
21+
ENV INSTALL_PREFIX=/usr/local
22+
ENV LD_LIBRARY_PATH=${INSTALL_PREFIX}/lib:${INSTALL_PREFIX}/lib/x86_64-linux-gnu:${LD_LIBRARY_PATH}
23+
ENV PATH=${INSTALL_PREFIX}/bin:${LD_LIBRARY_PATH}:${PATH}
24+
ENV LC_ALL=C.UTF-8 LANG=C.UTF-8
25+
26+
RUN apt-get update \
27+
&& apt-get install -y --no-install-recommends apt-utils \
28+
&& apt-get install -y \
29+
ca-certificates \
30+
build-essential \
31+
autoconf \
32+
libtool \
33+
python3-pip \
34+
python3-dev \
35+
git \
36+
zlib1g-dev \
37+
wget \
38+
unzip \
39+
vim \
40+
jq
41+
42+
RUN echo "deb [trusted=yes arch=amd64] https://download.01.org/intel-sgx/sgx_repo/ubuntu focal main" | tee /etc/apt/sources.list.d/intel-sgx.list ;
43+
RUN wget -qO - https://download.01.org/intel-sgx/sgx_repo/ubuntu/intel-sgx-deb.key | apt-key add - \
44+
&& apt-get update
45+
46+
# Install SGX-PSW
47+
RUN apt-get install -y libsgx-pce-logic libsgx-ae-qve libsgx-quote-ex libsgx-quote-ex-dev libsgx-qe3-logic sgx-aesm-service
48+
49+
# Install SGX-DCAP
50+
RUN apt-get install -y libsgx-dcap-ql-dev libsgx-dcap-default-qpl libsgx-dcap-quote-verify-dev libsgx-dcap-default-qpl-dev
51+
52+
# Gramine
53+
ENV GRAMINEDIR=/gramine
54+
ENV SGX_DCAP_VERSION=DCAP_1.11
55+
ENV GRAMINE_VERSION=v1.2
56+
ENV ISGX_DRIVER_PATH=${GRAMINEDIR}/driver
57+
ENV WERROR=1
58+
ENV SGX=1
59+
60+
RUN apt-get install -y bison gawk nasm python3-click python3-jinja2 ninja-build pkg-config \
61+
libcurl4-openssl-dev libprotobuf-c-dev python3-protobuf protobuf-c-compiler \
62+
libgmp-dev libmpfr-dev libmpc-dev libisl-dev
63+
64+
RUN pip3 install --upgrade pip \
65+
&& pip3 install toml meson cryptography
66+
67+
RUN git clone https://github.com/gramineproject/gramine.git ${GRAMINEDIR} \
68+
&& cd ${GRAMINEDIR} \
69+
&& git checkout ${GRAMINE_VERSION}
70+
71+
RUN git clone https://github.com/intel/SGXDataCenterAttestationPrimitives.git ${ISGX_DRIVER_PATH} \
72+
&& cd ${ISGX_DRIVER_PATH} \
73+
&& git checkout ${SGX_DCAP_VERSION}
74+
75+
RUN cd ${GRAMINEDIR} \
76+
&& LD_LIBRARY_PATH="" meson setup build/ --buildtype=debug -Dprefix=${INSTALL_PREFIX} -Ddirect=enabled -Dsgx=enabled -Ddcap=enabled -Dsgx_driver=dcap1.10 -Dsgx_driver_include_path=${ISGX_DRIVER_PATH}/driver/linux/include \
77+
&& LD_LIBRARY_PATH="" ninja -C build/ \
78+
&& LD_LIBRARY_PATH="" ninja -C build/ install
79+
80+
RUN echo "enabled=0" > /etc/default/apport
81+
RUN echo "exit 0" > /usr/sbin/policy-rc.d
82+
83+
# Clean tmp files
84+
RUN apt-get clean all \
85+
&& rm -rf /var/lib/apt/lists/* \
86+
&& rm -rf ~/.cache/* \
87+
&& rm -rf /tmp/*
88+
89+
RUN gramine-sgx-gen-private-key
90+
91+
RUN mkdir -p ${INSTALL_PREFIX} \
92+
&& wget -q -O cmake-linux.sh https://github.com/Kitware/CMake/releases/download/v3.19.6/cmake-3.19.6-Linux-x86_64.sh \
93+
&& sh cmake-linux.sh -- --skip-license --prefix=${INSTALL_PREFIX} \
94+
&& rm cmake-linux.sh
95+
96+
ENV WORKSPACE=/lr_infer_he_sgx
97+
WORKDIR ${WORKSPACE}
98+
99+
COPY src ./src
100+
COPY datasets ./datasets
101+
COPY cmake ./cmake
102+
COPY CMakeLists.txt \
103+
start_service.sh \
104+
infer_server.manifest.template \
105+
Makefile ./
106+
107+
RUN cmake -S. -Bbuild \
108+
&& cmake --build build \
109+
&& cp build/src/infer_server . \
110+
&& cp datasets/lrtest_mid_lrmodel.csv . \
111+
&& make clean \
112+
&& ENTRYPOINT=infer_server make
113+
114+
RUN echo "/lr_infer_he_sgx/start_service.sh" >> ~/.bashrc
115+
116+
ENV http_proxy=
117+
ENV https_proxy=

cczoo/lr_infer_he_sgx/Makefile

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
#
2+
# Copyright (c) 2022 Intel Corporation
3+
# SPDX-License-Identifier: Apache-2.0
4+
# Licensed under the Apache License, Version 2.0 (the "License");
5+
# you may not use this file except in compliance with the License.
6+
# You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
16+
################################# CONSTANTS ###################################
17+
18+
THIS_DIR := $(dir $(lastword $(MAKEFILE_LIST)))
19+
ARCH_LIBDIR ?= /lib/$(shell $(CC) -dumpmachine)
20+
ENTRYPOINT ?= ""
21+
22+
ifeq ($(DEBUG),1)
23+
GRAMINE_LOG_LEVEL = debug
24+
else
25+
GRAMINE_LOG_LEVEL = error
26+
endif
27+
28+
.PHONY: all
29+
all: infer_server.manifest
30+
ifeq ($(SGX),1)
31+
all: infer_server.manifest.sgx infer_server.sig infer_server.token
32+
endif
33+
34+
################################ MANIFEST ###############################
35+
36+
infer_server.manifest: infer_server.manifest.template
37+
gramine-manifest \
38+
-Dlog_level=$(GRAMINE_LOG_LEVEL) \
39+
-Dentrypoint=$(ENTRYPOINT) \
40+
-Darch_libdir=$(ARCH_LIBDIR) \
41+
$< >$@
42+
43+
infer_server.manifest.sgx: infer_server.manifest
44+
gramine-sgx-sign \
45+
--manifest $< \
46+
--output $@
47+
48+
infer_server.sig: infer_server.manifest.sgx
49+
50+
infer_server.token: infer_server.sig
51+
gramine-sgx-get-token --output $@ --sig $<
52+
53+
################################## CLEANUP ####################################
54+
55+
.PHONY: clean
56+
clean:
57+
$(RM) *.token *.sig *.manifest.sgx *.manifest
58+
$(RM) -r scripts/__pycache__
59+
60+
.PHONY: distclean
61+
distclean: clean

cczoo/lr_infer_he_sgx/README.md

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
# Secure Logistic Regression Inference with HE and Intel SGX
2+
## Introduction
3+
Nowadays, a wide variety of applications are available as SaaS applications. More and more AI workloads are deployed to the cloud.
4+
AI model providers benefit from the powerful data computing capability of the cloud and reduce the difficulty of maintaining a complex AI inference service.
5+
However, most of AI models are treated as the intellectual properties of AI model provider.
6+
How to protect AI models from disclosing to other parties including CSP is a problem that we need to address.
7+
On the other hand, users want to obtain precise inference result by utilizing the AI model which is trained from massive data.
8+
However, they don’t want to reveal their privacy data for the inference. Although, they can do AI inference locally by downloading the AI model to their end devices.
9+
But it is not feasible when the AI workload is very heavy, because the computing capability of most end devices, such as smart phones, are limited.
10+
Moreover, model providers don’t intend to share their AI models to end users directly either.
11+
12+
## Solution Description
13+
To address the above two problems, we developed a solution of secure AI inference based on SGX and HE. In this solution, we assume the AI workload executes on an enclave of cloud server with SGX enabled.
14+
To convince the model provider to believe their AI workloads are not tampered and run in an enclave, the cloud server generates quote and sends to model provider for remote attestation.
15+
After passing the quote verification, the model provider deploys the AI workload to the cloud and launch the AI inference service.
16+
The user generates a HE key pair locally and encrypts privacy data for inference with the public key. The encrypted data is transferred to the cloud server and do inference there.
17+
After the inference is completed, the encrypted result is sent back to the user through gRPC. User decrypts it with the private key and obtains the plaintext of finial result.
18+
19+
![image](https://user-images.githubusercontent.com/27326867/197438740-9923ca2e-0911-40a0-bf5e-d0bcebc78609.png)
20+
21+
## Build and Run
22+
### Prerequisite
23+
- A server with Intel SGX enabled
24+
- Docker
25+
### Build Docker Image
26+
```
27+
git clone https://github.com/intel/confidential-computing-zoo
28+
cd confidential-computing-zoo/cczoo/lr_infer_he_sgx
29+
./build_docker_image.sh
30+
```
31+
### Execution
32+
Open 2 terminals, one for the inference client that has data to be inferred and the other for the inference server that has a AI model.
33+
- Inference server
34+
```
35+
./start_container.sh server
36+
```
37+
- Inference client
38+
```
39+
./start_container.sh client
40+
```
41+
### Result
42+
>EncryptionParameters: wrote 91 bytes
43+
PublicKey: wrote 709085 bytes
44+
RelinKeys: wrote 3545129 bytes
45+
HE inference result - accuracy: 0.944
46+
47+
## Reference
48+
Intel HE Toolkit: https://github.com/intel/he-toolkit
49+
Intel SGX: https://www.intel.com/content/www/us/en/developer/tools/software-guard-extensions/overview.html
Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
#
2+
# Copyright (c) 2022 Intel Corporation
3+
# SPDX-License-Identifier: Apache-2.0
4+
# Licensed under the Apache License, Version 2.0 (the "License");
5+
# you may not use this file except in compliance with the License.
6+
# You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
16+
#!/bin/bash
17+
set -e
18+
19+
# You can remove no_proxy and proxy_server if your network doesn't need it
20+
no_proxy="localhost,127.0.0.1"
21+
proxy_server="" # your http proxy server
22+
23+
cd `dirname $0`
24+
25+
DOCKER_BUILDKIT=0 docker build \
26+
--build-arg no_proxy=${no_proxy} \
27+
--build-arg http_proxy=${proxy_server} \
28+
--build-arg https_proxy=${proxy_server} \
29+
-f Dockerfile \
30+
-t lr_infer_he_sgx:latest \
31+
.
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
# Copyright (C) 2022 Intel Corporation
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
include(ExternalProject)
5+
6+
# If user has not specified an install path, override the default usr/local to
7+
# be the build directory of the original target.
8+
if (NOT ${CMAKE_INSTALL_PREFIX})
9+
set (CMAKE_INSTALL_PREFIX ${CMAKE_CURRENT_BINARY_DIR})
10+
endif()
11+
12+
set(GFLAGS_GIT_REPO_URL https://github.com/gflags/gflags.git)
13+
set(GFLAGS_GIT_LABEL v2.2.2)
14+
15+
ExternalProject_Add(
16+
ext_gflags
17+
PREFIX ext_gflags
18+
GIT_REPOSITORY ${GFLAGS_GIT_REPO_URL}
19+
GIT_TAG ${GFLAGS_GIT_LABEL}
20+
INSTALL_COMMAND ""
21+
UPDATE_COMMAND ""
22+
EXCLUDE_FROM_ALL TRUE)
23+
24+
# ------------------------------------------------------------------------------
25+
26+
ExternalProject_Get_Property(ext_gflags SOURCE_DIR BINARY_DIR)
27+
28+
add_library(libgflags INTERFACE)
29+
add_dependencies(libgflags ext_gflags)
30+
message(STATUS "libgflags include: ${BINARY_DIR}/include/")
31+
message(STATUS "libgflags library: ${BINARY_DIR}/lib/")
32+
33+
target_include_directories(libgflags SYSTEM
34+
INTERFACE ${BINARY_DIR}/include)
35+
target_link_libraries(libgflags
36+
INTERFACE ${BINARY_DIR}/lib/libgflags.a)
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
# Copyright (c) 2022 Intel Corporation
2+
# SPDX-License-Identifier: Apache-2.0
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
message(STATUS "Using gRPC via add_subdirectory (FetchContent).")
16+
include(FetchContent)
17+
FetchContent_Declare(
18+
grpc
19+
GIT_REPOSITORY https://github.com/grpc/grpc.git
20+
GIT_TAG v1.38.1)
21+
FetchContent_MakeAvailable(grpc)
22+
23+
# Since FetchContent uses add_subdirectory under the hood, we can use
24+
# the grpc targets directly from this build.
25+
set(PROTOBUF_LIBPROTOBUF libprotobuf)
26+
set(REFLECTION grpc++_reflection)
27+
set(PROTOBUF_PROTOC $<TARGET_FILE:protoc>)
28+
set(GRPC_GRPCPP grpc++)
29+
set(GRPC_CPP_PLUGIN_EXECUTABLE $<TARGET_FILE:grpc_cpp_plugin>)

0 commit comments

Comments
 (0)