Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: load a model entirely from an onnx file and build circuit at runtime #25

Merged
merged 35 commits into from
Oct 4, 2022
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
1832411
add layout printing to example
jasonmorton Sep 23, 2022
97608c0
constraints not satisfied
jasonmorton Sep 25, 2022
d83a4f2
Purely dynamic load from onnx file
jasonmorton Sep 25, 2022
8fac9b5
rm cmt
jasonmorton Sep 25, 2022
18c87fa
laod example
jasonmorton Sep 25, 2022
e77e957
Cleanup warnings
jasonmorton Sep 26, 2022
ee45844
cleanup
jasonmorton Sep 26, 2022
3f8826f
track last known state on onnx configure
alexander-camuto Sep 26, 2022
10a5544
examples that break
alexander-camuto Sep 26, 2022
2eb553b
fix examples
alexander-camuto Sep 26, 2022
8180d99
fix after rebase
alexander-camuto Sep 30, 2022
55abff3
change BITS to dynamic in OnnxModel
jasonmorton Oct 1, 2022
2cbf878
to quant
jasonmorton Oct 2, 2022
e0e5466
to quant
jasonmorton Oct 2, 2022
88a6e2e
2D padding and stride
alexander-camuto Oct 2, 2022
b868862
ops formatting
alexander-camuto Oct 2, 2022
15795f0
named tuple inputs
alexander-camuto Oct 2, 2022
fdc7c4a
conv with bias and consistent affine interface
alexander-camuto Oct 2, 2022
44807ce
cnvrl automatic type casting in layout
alexander-camuto Oct 2, 2022
2402752
basic auto quantization
jasonmorton Oct 3, 2022
5a03223
cleanup, correct scale-based rescaling
jasonmorton Oct 3, 2022
1927b2b
cleanup
jasonmorton Oct 3, 2022
53e870f
parameter extractor helper function
alexander-camuto Oct 3, 2022
9b17f65
arbitrary length input extractor
alexander-camuto Oct 3, 2022
d9d874f
conv layout function
alexander-camuto Oct 3, 2022
47ea1e6
correct opkind for pytorch Conv2D
alexander-camuto Oct 3, 2022
b1cde21
Create 1lcnvrl.onnx
alexander-camuto Oct 3, 2022
d2111b4
start of conv configuration
alexander-camuto Oct 3, 2022
85d1f98
simplified affine
alexander-camuto Oct 3, 2022
16ad2b5
shape, quantize, configure, layout from convolution onnx
jasonmorton Oct 4, 2022
40444b1
correct output for conv example
jasonmorton Oct 4, 2022
95ee663
ezkl cli
alexander-camuto Oct 4, 2022
bcad075
Update Cargo.toml
alexander-camuto Oct 4, 2022
535f741
cleanup readme
alexander-camuto Oct 4, 2022
2fdcd2a
rm smallonnx
alexander-camuto Oct 4, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Purely dynamic load from onnx file
  • Loading branch information
jasonmorton authored and alexander-camuto committed Sep 30, 2022
commit d83a4f27986c37f117aeb31e794007b8155f01ea
2 changes: 1 addition & 1 deletion examples/loadedonnx.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ mod loadonnx_example {
let k = 15; //2^k rows
// let input = Tensor::<i32>::new(Some(&[-30, -21, 11]), &[3]).unwrap();
let input = Tensor::<i32>::new(Some(&[1, 2, 3]), &[3]).unwrap();
let public_input: Vec<i32> = vec![0, 0, 0, 0];
let public_input: Vec<i32> = vec![148, 0, 139, 0];
println!("public input {:?}", public_input);

let circuit = OnnxCircuit::<F, 14> {
Expand Down
25 changes: 13 additions & 12 deletions src/onnx/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ impl OnnxModel {
// let node = OnnxNode::new(self.model.nodes[node_idx].clone());
let node = &self.onnx_nodes[node_idx];

println!("Configure Node {}, a {:?}", node_idx, node.opkind());
// println!("Configure Node {}, a {:?}", node_idx, node.opkind());

// Figure out, find, and load the params
match node.opkind() {
Expand Down Expand Up @@ -279,13 +279,13 @@ impl OnnxModel {
// let mut bias: Tensor<Column<Fixed>> =
// (0..out_dim).map(|_| meta.fixed_column()).into();
// bias.reshape?
let weight_fixeds = advices.get_slice(&[0..out_dim], &[out_dim, in_dim]);
let weight_fixeds = advices.get_slice(&[0..out_dim], &[out_dim, in_dim]); //&[0..out_dim], &[out_dim, in_dim]
let bias_fixeds = advices.get_slice(&[out_dim + 1..out_dim + 2], &[out_dim]);
let params = [weight_fixeds, bias_fixeds];
// let input = advices.get_slice(&[in_dim..in_dim + 1], &[in_dim]);
// let output = advices.get_slice(&[out_dim + 1..out_dim + 2], &[out_dim]);
let input = advices.get_slice(&[0..1], &[in_dim]);
let output = advices.get_slice(&[1..2], &[out_dim]);
let input = advices.get_slice(&[out_dim + 2..out_dim + 3], &[in_dim]);
let output = advices.get_slice(&[out_dim + 3..out_dim + 4], &[out_dim]);
let conf = Affine1dConfig::configure(meta, &params, input, output);
Ok(OnnxNodeConfig::Affine(conf))
}
Expand Down Expand Up @@ -352,7 +352,7 @@ impl OnnxModel {
match vt.clone() {
ValTensor::PrevAssigned { inner: v, dims: _ } => {
let r: Tensor<i32> = v.clone().into();
println!("LAYER OUT {:?}", r);
println!("Node {} out: {:?}", node_idx, r);
}
_ => panic!("Should be assigned"),
};
Expand Down Expand Up @@ -392,9 +392,9 @@ impl OnnxModel {
// let node = OnnxNode::new(self.model.nodes[node_idx].clone());
let input_outlets = &node.node.inputs;

println!("Layout Node {}, {:?}", node_idx, node.opkind());
// println!("Layout Node {}, {:?}", node_idx, node.opkind());
// let nice: Tensor<i32> = input.into();
println!("Node {} input tensor {:?}", node_idx, &input.clone());
// println!("Node {} input tensor {:?}", node_idx, &input.clone());

// The node kind and the config should be the same.
match (node.opkind(), config) {
Expand All @@ -412,15 +412,16 @@ impl OnnxModel {
(input_outlets[1].node, input_outlets[1].slot);
let (bias_node_ix, bias_node_slot) = (input_outlets[2].node, input_outlets[2].slot);
let weight_node = OnnxNode::new(self.nodes()[weight_node_ix].clone());
let weight_value = weight_node.output_tensor_by_slot(weight_node_slot, 0f32, 2f32);
println!("Weight: {:?}", weight_value);
let weight_value =
weight_node.output_tensor_by_slot(weight_node_slot, 0f32, 256f32);
// println!("Weight: {:?}", weight_value);
// let in_dim = weight_value.dims()[1];
// let out_dim = weight_value.dims()[0];
let weight_vt =
ValTensor::from(<Tensor<i32> as Into<Tensor<Value<F>>>>::into(weight_value));
// let weight_vt = ValTensor::from(weight_value);
let bias_node = OnnxNode::new(self.nodes()[bias_node_ix].clone());
let bias_value = bias_node.output_tensor_by_slot(bias_node_slot, 0f32, 0f32);
let bias_value = bias_node.output_tensor_by_slot(bias_node_slot, 0f32, 256f32);
let bias_vt =
ValTensor::from(<Tensor<i32> as Into<Tensor<Value<F>>>>::into(bias_value));
// println!(
Expand All @@ -430,7 +431,7 @@ impl OnnxModel {
// bias_vt.dims()
// );
let out = ac.layout(layouter, input, &[weight_vt, bias_vt]);
println!("Node {} out {:?}", node_idx, out);
// println!("Node {} out {:?}", node_idx, out);
Some(out)
}
(OpKind::Convolution, OnnxNodeConfig::Conv(cc)) => {
Expand Down Expand Up @@ -505,7 +506,7 @@ impl OnnxModel {
}
}
}
Ok(max)
Ok(max + 5)
}

pub fn get_node_output_shape_by_name_and_rank(
Expand Down
5 changes: 4 additions & 1 deletion src/tensor/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,10 @@ impl<T: Clone + TensorType> Tensor<T> {
/// assert_eq!(a.get_index(&[1, 0, 1]), 10);
/// ```
pub fn get_index(&self, indices: &[usize]) -> usize {
assert!(self.dims.len() == indices.len());
// if self.dims.len() != indices.len() {
// println!("{:?} vs {:?}", self.dims, indices);
// }
assert_eq!(self.dims.len(), indices.len());
let mut index = 0;
let mut d = 1;
for i in (0..indices.len()).rev() {
Expand Down