Skip to content

Commit

Permalink
webnn: Deprecate MLTensorUsage in favor of boolean flags
Browse files Browse the repository at this point in the history
As per the feedback on this thread on the MLTensor explainer PR:
webmachinelearning/webnn#754 (comment)

This CL includes logic to still support specifying the deprecated
MLTensorUsage flags for now, though this logic will only exist for
about a milestone to give callers the opportunity to migrate their
existing code

Bug: 343638938
Change-Id: I56209e68fde3920b8d6c781c8f804ac6fcd35c9a
Cq-Include-Trybots: luci.chromium.try:mac14.arm64-blink-rel,win11-blink-rel
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/5933323
Reviewed-by: ningxin hu <ningxin.hu@intel.com>
Auto-Submit: Austin Sullivan <asully@chromium.org>
Commit-Queue: ningxin hu <ningxin.hu@intel.com>
Cr-Commit-Position: refs/heads/main@{#1370419}
  • Loading branch information
a-sully authored and chromium-wpt-export-bot committed Oct 18, 2024
1 parent 229ad07 commit ab3cd3a
Show file tree
Hide file tree
Showing 6 changed files with 64 additions and 40 deletions.
7 changes: 4 additions & 3 deletions webnn/conformance_tests/byob_readtensor.https.any.js
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,8 @@ promise_setup(async () => {
mlTensor = await mlContext.createTensor({
dataType: 'int32',
shape: [2, 4],
usage: MLTensorUsage.WRITE | MLTensorUsage.READ,
readable: true,
writable: true,
});
} catch (e) {
throw new AssertionError(
Expand Down Expand Up @@ -141,7 +142,7 @@ promise_test(async (t) => {
const tensor = await mlContext.createTensor({
dataType: 'int32',
shape: [2, 2],
usage: MLTensorUsage.READ,
readable: true,
});
const arrayBufferView = new Int32Array(2 * 2);
const arrayBuffer = arrayBufferView.buffer;
Expand All @@ -159,7 +160,7 @@ promise_test(async (t) => {
const tensor = await mlContext.createTensor({
dataType: 'int32',
shape: [2, 2],
usage: MLTensorUsage.READ,
readable: true,
});
const arrayBufferView = new Int32Array(2 * 2);
const arrayBuffer = arrayBufferView.buffer;
Expand Down
12 changes: 6 additions & 6 deletions webnn/conformance_tests/inputs-are-not-modified.https.any.js
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,10 @@ promise_test(async () => {
mlContext.createTensor({
dataType: 'float32',
shape: [4],
usage: MLTensorUsage.WRITE | MLTensorUsage.READ
readable: true,
writable: true,
}),
mlContext.createTensor(
{dataType: 'float32', shape: [4], usage: MLTensorUsage.READ}),
mlContext.createTensor({dataType: 'float32', shape: [4], readable: true}),
builder.build({'output': outputOperand})
]);

Expand Down Expand Up @@ -66,10 +66,10 @@ promise_test(async () => {
mlContext.createTensor({
dataType: 'float32',
shape: [4],
usage: MLTensorUsage.WRITE | MLTensorUsage.READ
readable: true,
writable: true,
}),
mlContext.createTensor(
{dataType: 'float32', shape: [4], usage: MLTensorUsage.READ}),
mlContext.createTensor({dataType: 'float32', shape: [4], readable: true}),
builder.build({'output': outputOperand})
]);

Expand Down
27 changes: 18 additions & 9 deletions webnn/conformance_tests/parallel-dispatch.https.any.js
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,8 @@ promise_test(async () => {
const operandDescriptor = {
dataType: 'float32',
shape: [1],
usage: MLTensorUsage.WRITE | MLTensorUsage.READ,
readable: true,
writable: true,
};

const [mlGraph, inputTensor1, inputTensor2, outputTensor] =
Expand Down Expand Up @@ -73,7 +74,8 @@ promise_test(async () => {
const operandDescriptor = {
dataType: 'float32',
shape: [1],
usage: MLTensorUsage.WRITE | MLTensorUsage.READ,
readable: true,
writable: true,
};
const mlGraph = await buildMulGraph(mlContext, operandDescriptor, 3);

Expand Down Expand Up @@ -101,7 +103,8 @@ promise_test(async () => {
const operandDescriptor = {
dataType: 'float32',
shape: [1],
usage: MLTensorUsage.WRITE | MLTensorUsage.READ,
readable: true,
writable: true,
};
const mlGraph = await buildMulGraph(mlContext, operandDescriptor, 10);

Expand Down Expand Up @@ -140,7 +143,8 @@ promise_test(async () => {
const operandDescriptor = {
dataType: 'float32',
shape: [1],
usage: MLTensorUsage.WRITE | MLTensorUsage.READ,
readable: true,
writable: true,
};
const mlGraph = await buildMulGraph(mlContext, operandDescriptor, 9);

Expand Down Expand Up @@ -178,7 +182,8 @@ promise_test(async () => {
const operandDescriptor = {
dataType: 'float32',
shape: [1],
usage: MLTensorUsage.WRITE | MLTensorUsage.READ,
readable: true,
writable: true,
};
const mlGraph = await buildMulGraph(mlContext, operandDescriptor, 2);

Expand Down Expand Up @@ -211,7 +216,8 @@ promise_test(async () => {
const operandDescriptor = {
dataType: 'float32',
shape: [1],
usage: MLTensorUsage.WRITE | MLTensorUsage.READ,
readable: true,
writable: true,
};

// write/write...
Expand Down Expand Up @@ -250,7 +256,8 @@ promise_test(async () => {
const operandDescriptor = {
dataType: 'float32',
shape: [1],
usage: MLTensorUsage.WRITE | MLTensorUsage.READ,
readable: true,
writable: true,
};

// write/write...
Expand Down Expand Up @@ -288,7 +295,8 @@ promise_test(async () => {
const operandDescriptor = {
dataType: 'float32',
shape: [1],
usage: MLTensorUsage.WRITE | MLTensorUsage.READ,
readable: true,
writable: true,
};

const graphs = await Promise.all([3, 2].map(async (multiplier) => {
Expand Down Expand Up @@ -324,7 +332,8 @@ promise_test(async () => {
const operandDescriptor = {
dataType: 'float32',
shape: [1],
usage: MLTensorUsage.WRITE | MLTensorUsage.READ,
readable: true,
writable: true,
};

const graphs = await Promise.all([2, 3].map(async (multiplier) => {
Expand Down
46 changes: 30 additions & 16 deletions webnn/conformance_tests/tensor.https.any.js
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,13 @@ const sizeOfDescriptor = (descriptor) => {
};

const getDescriptorFromTensor = (tensor) => {
return {dataType: tensor.dataType, shape: tensor.shape, usage: tensor.usage};
return {
dataType: tensor.dataType,
shape: tensor.shape,
readable: tensor.readable,
writable: tensor.writable,
importableToWebGPU: tensor.importableToWebGPU,
};
};


Expand Down Expand Up @@ -162,7 +168,7 @@ const testWriteTensor = (testName) => {
const tensorDescriptor = {
dataType: 'int32',
shape: [1],
usage: MLTensorUsage.WRITE,
writable: true,
};
let mlTensor = await mlContext.createTensor(tensorDescriptor);

Expand Down Expand Up @@ -211,7 +217,7 @@ const testWriteTensor = (testName) => {
const tensorDescriptor = {
dataType: 'int32',
shape: [2, 2],
usage: MLTensorUsage.WRITE,
writable: true,
};
let mlTensor = await mlContext.createTensor(tensorDescriptor);

Expand All @@ -228,7 +234,7 @@ const testWriteTensor = (testName) => {
const tensorDescriptor = {
dataType: 'int32',
shape: [2, 3],
usage: MLTensorUsage.WRITE,
writable: true,
};
let mlTensor = await mlContext.createTensor(tensorDescriptor);

Expand All @@ -247,7 +253,8 @@ const testWriteTensor = (testName) => {
let mlTensor = await mlContext.createTensor({
dataType: 'int32',
shape: [1],
usage: MLTensorUsage.WRITE | MLTensorUsage.READ,
readable: true,
writable: true,
});

// Initialize the tensor.
Expand All @@ -270,7 +277,8 @@ const testWriteTensor = (testName) => {
const tensorDescriptor = {
dataType: 'int32',
shape: [2, 2],
usage: MLTensorUsage.WRITE | MLTensorUsage.READ,
readable: true,
writable: true,
};
let mlTensor = await mlContext.createTensor(tensorDescriptor);

Expand Down Expand Up @@ -321,7 +329,7 @@ const testReadTensor = (testName) => {
let mlTensor = await mlContext.createTensor({
dataType: 'int32',
shape: [2, 2],
usage: MLTensorUsage.READ,
readable: true,
});

// Reading a destroyed MLTensor should reject.
Expand All @@ -335,7 +343,7 @@ const testReadTensor = (testName) => {
let mlTensor = await mlContext.createTensor({
dataType: 'int32',
shape: [2, 3],
usage: MLTensorUsage.READ,
readable: true,
});

let promise = mlContext.readTensor(mlTensor);
Expand All @@ -351,7 +359,7 @@ const testReadTensor = (testName) => {
let mlTensor = await mlContext.createTensor({
dataType: 'int32',
shape: [1024],
usage: MLTensorUsage.READ,
readable: true,
});

await assert_tensor_data_equals(mlContext, mlTensor, new Uint32Array(1024));
Expand All @@ -361,7 +369,8 @@ const testReadTensor = (testName) => {
let mlTensor = await mlContext.createTensor({
dataType: 'int32',
shape: [1],
usage: MLTensorUsage.READ | MLTensorUsage.WRITE,
readable: true,
writable: true,
});

// Initialize the tensor.
Expand All @@ -377,7 +386,8 @@ const testReadTensor = (testName) => {
let mlTensor = await mlContext.createTensor({
dataType: 'int32',
shape: [1],
usage: MLTensorUsage.WRITE | MLTensorUsage.READ,
readable: true,
writable: true,
});

// Initialize the tensor.
Expand All @@ -395,7 +405,8 @@ const testReadTensor = (testName) => {
let mlTensor = await mlContext.createTensor({
dataType: 'int32',
shape: [1],
usage: MLTensorUsage.WRITE | MLTensorUsage.READ,
readable: true,
writable: true,
});

// Initialize the tensor.
Expand All @@ -413,7 +424,8 @@ const testReadTensor = (testName) => {
let mlTensor = await mlContext.createTensor({
dataType: 'int32',
shape: [1],
usage: MLTensorUsage.WRITE | MLTensorUsage.READ,
readable: true,
writable: true,
});

// Initialize the tensor.
Expand All @@ -431,7 +443,8 @@ const testReadTensor = (testName) => {
let mlTensor = await mlContext.createTensor({
dataType: 'int32',
shape: [1],
usage: MLTensorUsage.WRITE | MLTensorUsage.READ,
readable: true,
writable: true,
});

const inputData = [0xAA, 0xAA, 0xAA, 0xAA];
Expand All @@ -448,7 +461,7 @@ const testReadTensor = (testName) => {
const tensorDescriptor = {
dataType: 'int32',
shape: [2, 3],
usage: MLTensorUsage.READ,
readable: true,
};
let mlTensor = await mlContext.createTensor(tensorDescriptor);

Expand Down Expand Up @@ -484,7 +497,8 @@ const testDispatchTensor = (testName) => {
const tensorDescriptor = {
dataType: 'float32',
shape: shape,
usage: MLTensorUsage.WRITE | MLTensorUsage.READ,
readable: true,
writable: true,
};
const lhsOperand = builder.input('lhs', tensorDescriptor);
const rhsOperand = builder.input('rhs', tensorDescriptor);
Expand Down
6 changes: 3 additions & 3 deletions webnn/validation_tests/destroyContext.https.any.js
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ promise_test(async t => {
const tensor = await context.createTensor({
dataType: 'float32',
shape: [1],
usage: MLTensorUsage.READ,
readable: true,
});
context.destroy();
promise_rejects_dom(t, 'InvalidStateError', context.readTensor(tensor));
Expand All @@ -146,7 +146,7 @@ promise_test(async t => {
const tensor = await context.createTensor({
dataType: 'float32',
shape: [1],
usage: MLTensorUsage.READ,
readable: true,
});
let promise = context.readTensor(tensor);
context.destroy();
Expand All @@ -161,7 +161,7 @@ promise_test(async t => {
const tensor = await context.createTensor({
dataType: 'float32',
shape: [1],
usage: MLTensorUsage.WRITE,
writable: true,
});
let arrayBuffer = new ArrayBuffer(4);
context.destroy();
Expand Down
6 changes: 3 additions & 3 deletions webnn/validation_tests/destroyGraph.https.any.js
Original file line number Diff line number Diff line change
Expand Up @@ -110,17 +110,17 @@ promise_test(async t => {
const lhsTensor = await context.createTensor({
dataType: 'float32',
shape: [1],
usage: MLTensorUsage.WRITE,
writable: true,
});
const rhsTensor = await context.createTensor({
dataType: 'float32',
shape: [1],
usage: MLTensorUsage.WRITE,
writable: true,
});
const outputTensor = await context.createTensor({
dataType: 'float32',
shape: [1],
usage: MLTensorUsage.READ,
readable: true,
});
// Initialize inputs
const inputData = new Float32Array(1).fill(2.0);
Expand Down

0 comments on commit ab3cd3a

Please sign in to comment.