Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: load a model entirely from an onnx file and build circuit at runtime #25

Merged
merged 35 commits into from
Oct 4, 2022
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
1832411
add layout printing to example
jasonmorton Sep 23, 2022
97608c0
constraints not satisfied
jasonmorton Sep 25, 2022
d83a4f2
Purely dynamic load from onnx file
jasonmorton Sep 25, 2022
8fac9b5
rm cmt
jasonmorton Sep 25, 2022
18c87fa
laod example
jasonmorton Sep 25, 2022
e77e957
Cleanup warnings
jasonmorton Sep 26, 2022
ee45844
cleanup
jasonmorton Sep 26, 2022
3f8826f
track last known state on onnx configure
alexander-camuto Sep 26, 2022
10a5544
examples that break
alexander-camuto Sep 26, 2022
2eb553b
fix examples
alexander-camuto Sep 26, 2022
8180d99
fix after rebase
alexander-camuto Sep 30, 2022
55abff3
change BITS to dynamic in OnnxModel
jasonmorton Oct 1, 2022
2cbf878
to quant
jasonmorton Oct 2, 2022
e0e5466
to quant
jasonmorton Oct 2, 2022
88a6e2e
2D padding and stride
alexander-camuto Oct 2, 2022
b868862
ops formatting
alexander-camuto Oct 2, 2022
15795f0
named tuple inputs
alexander-camuto Oct 2, 2022
fdc7c4a
conv with bias and consistent affine interface
alexander-camuto Oct 2, 2022
44807ce
cnvrl automatic type casting in layout
alexander-camuto Oct 2, 2022
2402752
basic auto quantization
jasonmorton Oct 3, 2022
5a03223
cleanup, correct scale-based rescaling
jasonmorton Oct 3, 2022
1927b2b
cleanup
jasonmorton Oct 3, 2022
53e870f
parameter extractor helper function
alexander-camuto Oct 3, 2022
9b17f65
arbitrary length input extractor
alexander-camuto Oct 3, 2022
d9d874f
conv layout function
alexander-camuto Oct 3, 2022
47ea1e6
correct opkind for pytorch Conv2D
alexander-camuto Oct 3, 2022
b1cde21
Create 1lcnvrl.onnx
alexander-camuto Oct 3, 2022
d2111b4
start of conv configuration
alexander-camuto Oct 3, 2022
85d1f98
simplified affine
alexander-camuto Oct 3, 2022
16ad2b5
shape, quantize, configure, layout from convolution onnx
jasonmorton Oct 4, 2022
40444b1
correct output for conv example
jasonmorton Oct 4, 2022
95ee663
ezkl cli
alexander-camuto Oct 4, 2022
bcad075
Update Cargo.toml
alexander-camuto Oct 4, 2022
535f741
cleanup readme
alexander-camuto Oct 4, 2022
2fdcd2a
rm smallonnx
alexander-camuto Oct 4, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
conv with bias and consistent affine interface
  • Loading branch information
alexander-camuto committed Oct 2, 2022
commit fdc7c4a5ea212c90c8a34052aa15b41aa43849bf
18 changes: 15 additions & 3 deletions benches/cnvrl.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@ where
Value<F>: TensorType,
{
image: ValTensor<F>,
kernels: ValTensor<F>,
kernel: ValTensor<F>,
bias: ValTensor<F>,
}

impl<F: FieldExt + TensorType> Circuit<F> for MyCircuit<F>
Expand Down Expand Up @@ -57,10 +58,13 @@ where
);
kernel.reshape(&[OUT_CHANNELS, IN_CHANNELS, KERNEL_HEIGHT, KERNEL_WIDTH]);

let bias = Tensor::from((0..OUT_CHANNELS).map(|_| meta.fixed_column()));

Self::Config::configure(
meta,
&[
VarTensor::from(kernel),
VarTensor::from(bias),
advices.get_slice(
&[0..IMAGE_HEIGHT * IN_CHANNELS],
&[IN_CHANNELS, IMAGE_HEIGHT, IMAGE_WIDTH],
Expand All @@ -80,7 +84,10 @@ where
config: Self::Config,
mut layouter: impl Layouter<F>,
) -> Result<(), Error> {
let _output = config.layout(&mut layouter, &[self.kernels.clone(), self.image.clone()]);
let _output = config.layout(
&mut layouter,
&[self.kernel.clone(), self.bias.clone(), self.image.clone()],
);
Ok(())
}
}
Expand Down Expand Up @@ -110,9 +117,14 @@ fn runcnvrl(c: &mut Criterion) {
);
kernels.reshape(&[OUT_CHANNELS, IN_CHANNELS, KERNEL_HEIGHT, KERNEL_WIDTH]);

let bias = Tensor::from(
(0..{ OUT_CHANNELS }).map(|_| Value::known(pallas::Base::random(OsRng))),
);

let circuit = MyCircuit::<pallas::Base> {
image: ValTensor::from(image),
kernels: ValTensor::from(kernels),
kernel: ValTensor::from(kernels),
bias: ValTensor::from(bias),
};

group.throughput(Throughput::Elements((IMAGE_HEIGHT * IMAGE_WIDTH) as u64));
Expand Down
53 changes: 26 additions & 27 deletions src/nn/affine.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
use super::*;
use crate::tensor::ops::*;
use crate::tensor::{Tensor, TensorType};
use halo2_proofs::{
arithmetic::FieldExt,
circuit::{Layouter, Value},
plonk::{Assigned, ConstraintSystem, Constraints, Expression, Selector},
circuit::Layouter,
plonk::{ConstraintSystem, Constraints, Expression, Selector},
};
use std::marker::PhantomData;

Expand Down Expand Up @@ -40,8 +41,6 @@ impl<F: FieldExt + TensorType> LayerConfig<F> for Affine1dConfig<F> {
assert_eq!(kernel.dims()[0], output.dims()[0]);
assert_eq!(kernel.dims()[0], bias.dims()[0]);

let in_dim = input.dims()[0];

let config = Self {
selector: meta.selector(),
kernel,
Expand All @@ -53,17 +52,16 @@ impl<F: FieldExt + TensorType> LayerConfig<F> for Affine1dConfig<F> {

meta.create_gate("affine", |meta| {
let selector = meta.query_selector(config.selector);

// Now we compute the linear expression, and add it to constraints
let input = config.input.query(meta, 0);
let kernel = config.kernel.query(meta, 0);
let bias = config.bias.query(meta, 0);

let witnessed_output = matmul(kernel, bias, input);

// Get output expressions for each input channel
let expected_output: Tensor<Expression<F>> = config.output.query(meta, 0);
// Now we compute the linear expression, and add it to constraints
let witnessed_output = expected_output.enum_map(|i, _| {
let mut c = Expression::Constant(<F as TensorType>::zero().unwrap());
for j in 0..in_dim {
c = c + config.kernel.query_idx(meta, i, j) * config.input.query_idx(meta, 0, j)
}
c + config.bias.query_idx(meta, 0, i)
// add the bias
});

let constraints = witnessed_output.enum_map(|i, o| o - expected_output[i].clone());

Expand All @@ -85,20 +83,21 @@ impl<F: FieldExt + TensorType> LayerConfig<F> for Affine1dConfig<F> {
|mut region| {
let offset = 0;
self.selector.enable(&mut region, offset)?;
let input = self.input.assign(&mut region, offset, input.clone());
let weights = self.kernel.assign(&mut region, offset, kernel.clone());
let bias = self.bias.assign(&mut region, offset, bias.clone());
// calculate value of output
let mut output: Tensor<Value<Assigned<F>>> =
Tensor::new(None, &[kernel.dims()[0]]).unwrap();

output = output.enum_map(|i, mut o| {
input.enum_map(|j, x| {
o = o + x.value_field() * weights.get(&[i, j]).value_field();
});

o + bias.get(&[i]).value_field()
});
let inp = self
.input
.assign(&mut region, offset, input.clone())
.map(|e| e.value_field());
let k = self
.kernel
.assign(&mut region, offset, kernel.clone())
.map(|e| e.value_field());
let b = self
.bias
.assign(&mut region, offset, bias.clone())
.map(|e| e.value_field());

let mut output = matmul(k, b, inp);
output.flatten();

Ok(self
.output
Expand Down
25 changes: 17 additions & 8 deletions src/nn/cnvrl.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ where
{
selector: Selector,
kernel: VarTensor,
bias: VarTensor,
input: VarTensor,
pub output: VarTensor,
padding: (usize, usize),
Expand All @@ -35,15 +36,17 @@ where
variables: &[VarTensor],
conv_params: Option<&[usize]>,
) -> Self {
assert_eq!(variables.len(), 3);
let (kernel, input, output) = (
assert_eq!(variables.len(), 4);
let (kernel, bias, input, output) = (
variables[0].clone(),
variables[1].clone(),
variables[2].clone(),
variables[3].clone(),
);
assert_eq!(input.dims().len(), 3);
assert_eq!(output.dims().len(), 3);
assert_eq!(kernel.dims().len(), 4);
assert_eq!(bias.dims().len(), 1);

// should fail if None
let conv_params = conv_params.unwrap();
Expand All @@ -58,6 +61,7 @@ where
let config = Self {
selector: meta.selector(),
kernel,
bias,
input,
output,
padding: (conv_params[0], conv_params[1]),
Expand All @@ -71,8 +75,9 @@ where
// Get output expressions for each input channel
let image = config.input.query(meta, 0);
let kernel = config.kernel.query(meta, 0);
let mut bias = config.bias.query(meta, 0);

let expected_output = convolution(kernel, image, config.padding, config.stride);
let expected_output = convolution(kernel, bias, image, config.padding, config.stride);

let witnessed_output = config.output.query(meta, image_width);

Expand All @@ -87,9 +92,9 @@ where
/// Assigns values to the convolution gate variables created when calling `configure`.
/// Values are supplied as a 2-element array of `[kernel, input]` VarTensors.
fn layout(&self, layouter: &mut impl Layouter<F>, values: &[ValTensor<F>]) -> ValTensor<F> {
assert_eq!(values.len(), 2);
assert_eq!(values.len(), 3);

let (kernel, input) = (values[0].clone(), values[1].clone());
let (kernel, bias, input) = (values[0].clone(), values[1].clone(), values[2].clone());
let image_width = input.dims()[2];

let t = layouter
Expand All @@ -99,16 +104,20 @@ where
self.selector.enable(&mut region, 0)?;

self.kernel.assign(&mut region, 0, kernel.clone());
self.bias.assign(&mut region, 0, bias.clone());
self.input.assign(&mut region, 0, input.clone());

let output = match input.clone() {
ValTensor::Value {
inner: img,
dims: _,
} => match kernel.clone() {
ValTensor::Value { inner: k, dims: _ } => {
convolution::<_>(k, img, self.padding, self.stride)
}
ValTensor::Value { inner: k, dims: _ } => match bias.clone() {
ValTensor::Value { inner: b, dims: _ } => {
convolution::<_>(k, b, img, self.padding, self.stride)
}
_ => todo!(),
},
_ => todo!(),
},
_ => todo!(),
Expand Down
4 changes: 3 additions & 1 deletion src/tensor/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -336,7 +336,9 @@ impl<T: Clone + TensorType> Tensor<T> {
/// assert_eq!(c, Tensor::from([1, 16].into_iter()))
/// ```
pub fn map<F: FnMut(T) -> G, G: TensorType>(&self, mut f: F) -> Tensor<G> {
Tensor::from(self.inner.iter().map(|e| f(e.clone())))
let mut t = Tensor::from(self.inner.iter().map(|e| f(e.clone())));
t.reshape(self.dims());
t
}

/// Maps a function to tensors and enumerates
Expand Down
47 changes: 34 additions & 13 deletions src/tensor/ops.rs
Original file line number Diff line number Diff line change
@@ -1,47 +1,62 @@
use crate::tensor::{Tensor, TensorType};
pub use std::ops::{Add, Mul};

/// Matrix multiplies two 2D tensors.
/// Matrix multiplies two 2D tensors (and adds an offset).
/// ```
/// use halo2deeplearning::tensor::Tensor;
/// use halo2deeplearning::tensor::ops::matmul;
///
/// let x = Tensor::<i32>::new(
/// Some(&[5, 2, 3, 0, 4, -1, 3, 1, 6]),
/// &[3, 3],
/// Some(&[5, 2, 3, 0, 4, -1, 3, 1, 6, 2, 1, 1]),
/// &[3, 4],
/// ).unwrap();
/// let k = Tensor::<i32>::new(
/// Some(&[2, 1, 2, 1, 1, 1]),
/// &[2, 3],
/// ).unwrap();
/// let result = matmul(k, x);
/// let expected = Tensor::<i32>::new(Some(&[18, 2, 19, 10, 3, 10]), &[2, 3]).unwrap();
/// let b = Tensor::<i32>::new(
/// Some(&[0, 0]),
/// &[2],
/// ).unwrap();
/// let result = matmul(k, b, x);
/// let expected = Tensor::<i32>::new(Some(&[26, 7, 11, 3, 15, 3, 7, 2]), &[2, 4]).unwrap();
/// assert_eq!(result, expected);
/// ```
pub fn matmul<T: TensorType + Mul<Output = T> + Add<Output = T>>(
kernel: Tensor<T>,
input: Tensor<T>,
bias: Tensor<T>,
mut input: Tensor<T>,
) -> Tensor<T> {
assert_eq!(bias.dims()[0], kernel.dims()[0]);
assert_eq!(input.dims()[0], kernel.dims()[1]);

// does matrix to vector multiplication
match input.dims().len() {
1 => input.reshape(&[input.dims()[0], 1]),
_ => {}
}

let input_dims = input.dims();
let kernel_dims = kernel.dims();

assert!(input_dims[0] == kernel_dims[1]);

// calculate value of output
let mut output: Tensor<T> = Tensor::new(None, &[kernel_dims[0], input_dims[1]]).unwrap();

for i in 0..kernel_dims[0] {
for j in 0..input_dims[1] {
output.set(
&[i, j],
dot_product(kernel.get_slice(&[i..i + 1]), input.get_slice(&[j..j + 1])),
dot_product(
kernel.get_slice(&[i..i + 1]),
input.get_slice(&[0..input_dims[0], j..j + 1]),
) + bias[i].clone(),
);
}
}
output
}

/// Applies convolution over a 3D tensor of shape C x H x W.
/// Applies convolution over a 3D tensor of shape C x H x W (and adds a bias).
/// ```
/// use halo2deeplearning::tensor::Tensor;
/// use halo2deeplearning::tensor::ops::convolution;
Expand All @@ -54,24 +69,30 @@ pub fn matmul<T: TensorType + Mul<Output = T> + Add<Output = T>>(
/// Some(&[5, 1, 1, 1]),
/// &[1, 1, 2, 2],
/// ).unwrap();
/// let result = convolution::<i32>(k, x, &[0, 0, 1, 1]);
/// let b = Tensor::<i32>::new(
/// Some(&[0]),
/// &[1],
/// ).unwrap();
/// let result = convolution::<i32>(k, b, x, (0, 0), (1, 1));
/// let expected = Tensor::<i32>::new(Some(&[31, 16, 8, 26]), &[1, 2, 2]).unwrap();
/// assert_eq!(result, expected);
/// ```
pub fn convolution<T: TensorType + Mul<Output = T> + Add<Output = T>>(
kernel: Tensor<T>,
bias: Tensor<T>,
image: Tensor<T>,
padding: (usize, usize),
stride: (usize, usize),
) -> Tensor<T> {
assert_eq!(image.dims().len(), 3);
assert_eq!(kernel.dims().len(), 4);
assert_eq!(bias.dims().len(), 1);
assert_eq!(image.dims()[0], kernel.dims()[1]);
assert_eq!(bias.dims()[0], kernel.dims()[0]);

let image_dims = image.dims();
let kernel_dims = kernel.dims();


let (output_channels, input_channels, kernel_height, kernel_width) = (
kernel_dims[0],
kernel_dims[1],
Expand Down Expand Up @@ -104,7 +125,7 @@ pub fn convolution<T: TensorType + Mul<Output = T> + Add<Output = T>>(
rs..(rs + kernel_height),
cs..(cs + kernel_width),
]),
),
) + bias[i].clone(),
);
}
}
Expand Down