0

WebNN: Continue moving generic end2end tests to MLGraphTestBase

The CL-4236177 [1] has extracted elementwise binary operator into
MLGraphTestBase, this CL continue doing for relu, clamp, conv2d, gemm,
hardswish, pool2d and reshape operator.

[1] https://chromium-review.googlesource.com/c/chromium/src/+/4236177

Bug: 1273291
Change-Id: I5f05601bcc38bfe8378c598d3c4e1f54209205eb
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/4277630
Reviewed-by: Jiewei Qian <qjw@chromium.org>
Commit-Queue: Junwei Fu <junwei.fu@intel.com>
Cr-Commit-Position: refs/heads/main@{#1112023}
This commit is contained in:
junwei
2023-03-02 03:02:53 +00:00
committed by Chromium LUCI CQ
parent d9fa5c5e14
commit 72fa141d4d
7 changed files with 1155 additions and 947 deletions

@ -73,7 +73,15 @@ source_set("unit_tests") {
]
if (build_webnn_with_xnnpack) {
sources += [ "webnn/ml_graph_xnnpack_test.cc" ]
sources += [
"webnn/ml_graph_test.cc",
"webnn/ml_graph_xnnpack_test.cc",
]
deps += [ "//third_party/xnnpack" ]
}
if (is_chromeos && current_cpu == "x64") {
sources += [ "webnn/ml_graph_test_tflite.cc" ]
deps += [ "//third_party/tflite" ]
}
}

@ -6,7 +6,6 @@
#include <algorithm>
#include <memory>
#include <numeric>
#include "base/numerics/checked_math.h"
#include "third_party/blink/renderer/bindings/core/v8/native_value_traits_impl.h"
@ -2770,10 +2769,12 @@ TEST_P(FakeMLGraphTest, ComputeTest) {
}
}
INSTANTIATE_TEST_SUITE_P(All,
FakeMLGraphTest,
::testing::Values(ExecutionMode::kAsync,
ExecutionMode::kSync),
ExecutionModeParamToString);
INSTANTIATE_TEST_SUITE_P(
All,
FakeMLGraphTest,
testing::Combine(::testing::Values(BackendType::kFake),
::testing::Values(ExecutionMode::kAsync,
ExecutionMode::kSync)),
TestVarietyToString);
} // namespace blink

@ -0,0 +1,928 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <numeric>
#include "third_party/blink/renderer/bindings/core/v8/v8_binding_for_testing.h"
#include "third_party/blink/renderer/bindings/modules/v8/v8_ml_clamp_options.h"
#include "third_party/blink/renderer/bindings/modules/v8/v8_ml_conv_2d_options.h"
#include "third_party/blink/renderer/bindings/modules/v8/v8_ml_pool_2d_options.h"
#include "third_party/blink/renderer/modules/ml/webnn/ml_graph_builder.h"
#include "third_party/blink/renderer/modules/ml/webnn/ml_graph_test_base.h"
namespace blink {
class MLGraphTest : public MLGraphTestBase {};
template <typename T>
struct ElementWiseBinaryTester {
MLGraphTest* helper;
ElementWiseBinaryKind kind;
OperandInfo<T> lhs;
OperandInfo<T> rhs;
Vector<T> expected;
void Test(V8TestingScope& scope) {
// Build the graph.
auto* builder = CreateMLGraphBuilder(scope.GetExecutionContext());
auto* lhs_operand = BuildInput(builder, "lhs", lhs.dimensions, lhs.type,
scope.GetExceptionState());
auto* rhs_operand = BuildInput(builder, "rhs", rhs.dimensions, rhs.type,
scope.GetExceptionState());
auto* output_operand =
BuildElementWiseBinary(scope, builder, kind, lhs_operand, rhs_operand);
auto [graph, build_exception] =
helper->BuildGraph(scope, builder, {{"output", output_operand}});
EXPECT_NE(graph, nullptr);
// Compute the graph.
MLNamedArrayBufferViews inputs(
{{"lhs", CreateArrayBufferViewForOperand(lhs_operand, lhs.values)},
{"rhs", CreateArrayBufferViewForOperand(rhs_operand, rhs.values)}});
MLNamedArrayBufferViews outputs(
{{"output", CreateArrayBufferViewForOperand(output_operand)}});
auto* compute_exception =
helper->ComputeGraph(scope, graph, inputs, outputs);
EXPECT_EQ(compute_exception, nullptr);
auto results = GetArrayBufferViewValues<T>(outputs[0].second);
EXPECT_EQ(results, expected);
}
};
TEST_P(MLGraphTest, ElementWiseBinaryTest) {
V8TestingScope scope;
{
// Test element-wise add operator for two 1-D tensors.
// The expected results should be the sum of the values of the two input
// tensors, element-wise.
ElementWiseBinaryTester<float>{
.helper = this,
.kind = ElementWiseBinaryKind::kAdd,
.lhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2},
.values = {1.0, 2.0}},
.rhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2},
.values = {3.0, 4.0}},
.expected = {4.0, 6.0}}
.Test(scope);
}
{
// Test element-wise add operator for two 2-D tensors.
ElementWiseBinaryTester<float>{
.helper = this,
.kind = ElementWiseBinaryKind::kAdd,
.lhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 2},
.values = {1.0, 2.0, 3.0, 4.0}},
.rhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 2},
.values = {5.0, 6.0, 7.0, 8.0}},
.expected = {6.0, 8.0, 10.0, 12.0}}
.Test(scope);
}
{
// Test element-wise add operator for 1-D tensor broadcasting to 2-D
// tensor.
ElementWiseBinaryTester<float>{
.helper = this,
.kind = ElementWiseBinaryKind::kAdd,
.lhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 2},
.values = {1.0, 2.0, 3.0, 4.0}},
.rhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2},
.values = {5.0, 6.0}},
.expected = {6.0, 8.0, 8.0, 10.0}}
.Test(scope);
}
{
// Test element-wise add operator for 3-D tensor broadcasting to 3-D
// tensor.
ElementWiseBinaryTester<float>{
.helper = this,
.kind = ElementWiseBinaryKind::kAdd,
.lhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2},
.values = {1.0, 2.0, 3.0, 4.0}},
.rhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 1, 2},
.values = {5.0, 6.0, 7.0, 8.0}},
.expected = {6.0, 8.0, 8.0, 10.0, 8.0, 10.0, 10.0, 12.0}}
.Test(scope);
}
{
// Test element-wise add operator for two 4-D tensors
ElementWiseBinaryTester<float>{
.kind = ElementWiseBinaryKind::kAdd,
.lhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {1.0, 2.0, 3.0, 4.0}},
.rhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {5.0, 6.0, 7.0, 8.0}},
.expected = {6.0, 8.0, 10.0, 12.0}}
.Test(scope);
}
{
// Test element-wise sub operator for two 4-D tensors.
// The expected results should be the difference of the values of the two
// input tensors, element-wise.
ElementWiseBinaryTester<float>{
.helper = this,
.kind = ElementWiseBinaryKind::kSub,
.lhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {1.0, 2.0, 3.0, 4.0}},
.rhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {5.0, 6.0, 7.0, 8.0}},
.expected = {-4.0, -4.0, -4.0, -4.0}}
.Test(scope);
}
{
// Test element-wise mul operator for two 4-D tensors.
// The expected results should be the prdocut of the values of the two input
// tensors, element-wise.
ElementWiseBinaryTester<float>{
.helper = this,
.kind = ElementWiseBinaryKind::kMul,
.lhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {1.0, 2.0, 3.0, 4.0}},
.rhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {5.0, 6.0, 7.0, 8.0}},
.expected = {5.0, 12.0, 21.0, 32.0}}
.Test(scope);
}
{
// Test element-wise div operator for two 4-D tensors.
// The expected results should be the quotient of the values of the two
// input tensors, element-wise.
ElementWiseBinaryTester<float>{
.helper = this,
.kind = ElementWiseBinaryKind::kDiv,
.lhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {3.0, 4.0, 6.0, 8.0}},
.rhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {1.0, 2.0, 2.0, 2.0}},
.expected = {3.0, 2.0, 3.0, 4.0}}
.Test(scope);
}
{
// Test element-wise min operator for two 4-D tensors.
// The expected results should be the lesser values of the two input
// tensors, element-wise.
ElementWiseBinaryTester<float>{
.helper = this,
.kind = ElementWiseBinaryKind::kMin,
.lhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {1.0, 4.0, 5.0, 8.0}},
.rhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {2.0, 3.0, 6.0, 7.0}},
.expected = {1.0, 3.0, 5.0, 7.0}}
.Test(scope);
}
{
// Test element-wise max operator for two 4-D tensors.
// The expected results should be the greater values of the two input
// tensors, element-wise.
ElementWiseBinaryTester<float>{
.helper = this,
.kind = ElementWiseBinaryKind::kMax,
.lhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {1.0, 4.0, 5.0, 8.0}},
.rhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {2.0, 3.0, 6.0, 7.0}},
.expected = {2.0, 4.0, 6.0, 8.0}}
.Test(scope);
}
}
template <typename T>
struct ReluTester {
MLGraphTestBase* helper;
OperandInfo<T> input;
Vector<T> expected;
void Test(V8TestingScope& scope) {
// Build the graph.
auto* builder = CreateMLGraphBuilder(scope.GetExecutionContext());
auto* input_operand = BuildInput(builder, "input", input.dimensions,
input.type, scope.GetExceptionState());
auto* output_operand =
builder->relu(input_operand, scope.GetExceptionState());
auto [graph, build_exception] =
helper->BuildGraph(scope, builder, {{"output", output_operand}});
EXPECT_NE(graph, nullptr);
// Compute the graph.
MLNamedArrayBufferViews inputs(
{{"input",
CreateArrayBufferViewForOperand(input_operand, input.values)}});
MLNamedArrayBufferViews outputs(
{{"output", CreateArrayBufferViewForOperand(output_operand)}});
auto* compute_exception =
helper->ComputeGraph(scope, graph, inputs, outputs);
EXPECT_EQ(compute_exception, nullptr);
auto results = GetArrayBufferViewValues<T>(outputs[0].second);
EXPECT_EQ(results, expected);
}
};
TEST_P(MLGraphTest, ReluTest) {
V8TestingScope scope;
{
// Test relu operator for 1-D tensor.
// The expected results should be the result of the rectified linear
// function, y = max(0, x), applied to the input tensor, element-wise.
ReluTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2},
.values = {-1.0, 1.0}},
.expected = {0.0, 1.0}}
.Test(scope);
}
{
// Test relu operator for 2-D tensor.
ReluTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 2},
.values = {-10.0, -0.5, 0.5, 10.0}},
.expected = {0.0, 0.0, 0.5, 10.0}}
.Test(scope);
}
{
// Test relu operator for 3-D tensor.
ReluTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2},
.values = {-10.0, -0.5, 0.5, 10.0}},
.expected = {0.0, 0.0, 0.5, 10.0}}
.Test(scope);
}
{
// Test relu operator for 4-D tensor.
ReluTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {-10.0, -0.5, 0.5, 10.0}},
.expected = {0.0, 0.0, 0.5, 10.0}}
.Test(scope);
}
}
template <typename T>
struct Resample2dTester {
MLGraphTestBase* helper;
OperandInfo<T> input;
Vector<T> expected;
void Test(V8TestingScope& scope,
MLResample2dOptions* options = MLResample2dOptions::Create()) {
// Build the graph.
auto* builder = CreateMLGraphBuilder(scope.GetExecutionContext());
auto* input_operand = BuildInput(builder, "input", input.dimensions,
input.type, scope.GetExceptionState());
auto* output_operand =
BuildResample2d(scope, builder, input_operand, options);
auto [graph, build_exception] =
helper->BuildGraph(scope, builder, {{"output", output_operand}});
EXPECT_NE(graph, nullptr);
// Compute the graph.
MLNamedArrayBufferViews inputs(
{{"input",
CreateArrayBufferViewForOperand(input_operand, input.values)}});
MLNamedArrayBufferViews outputs(
{{"output", CreateArrayBufferViewForOperand(output_operand)}});
auto* compute_exception =
helper->ComputeGraph(scope, graph, inputs, outputs);
EXPECT_EQ(compute_exception, nullptr);
auto results = GetArrayBufferViewValues<T>(outputs[0].second);
EXPECT_EQ(results, expected);
}
};
TEST_P(MLGraphTest, Resample2dTest) {
V8TestingScope scope;
{
// Test resample2d operator with axes = {1, 2}, sizes = {4, 4}.
auto* options = MLResample2dOptions::Create();
options->setSizes({4, 4});
options->setAxes({1, 2});
options->setMode(V8MLInterpolationMode::Enum::kLinear);
Resample2dTester<float>{
.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {1, 2, 3, 4}},
.expected = {1., 1.25, 1.75, 2., 1.5, 1.75, 2.25, 2.5, 2.5, 2.75, 3.25,
3.5, 3., 3.25, 3.75, 4.}}
.Test(scope, options);
}
{
// Test resample2d operator with axes = {1, 2}, scales = {2.0, 2.0}.
auto* options = MLResample2dOptions::Create();
options->setScales({2.0, 2.0});
options->setAxes({1, 2});
options->setMode(V8MLInterpolationMode::Enum::kLinear);
Resample2dTester<float>{
.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {1, 2, 3, 4}},
.expected = {1., 1.25, 1.75, 2., 1.5, 1.75, 2.25, 2.5, 2.5, 2.75, 3.25,
3.5, 3., 3.25, 3.75, 4.}}
.Test(scope, options);
}
}
template <typename T>
struct ClampTester {
MLGraphTestBase* helper;
OperandInfo<T> input;
Vector<T> expected;
void Test(V8TestingScope& scope,
MLClampOptions* options = MLClampOptions::Create()) {
// Build the graph.
auto* builder = CreateMLGraphBuilder(scope.GetExecutionContext());
auto* input_operand = BuildInput(builder, "input", input.dimensions,
input.type, scope.GetExceptionState());
auto* output_operand =
builder->clamp(input_operand, options, scope.GetExceptionState());
auto [graph, build_exception] =
helper->BuildGraph(scope, builder, {{"output", output_operand}});
EXPECT_NE(graph, nullptr);
// Compute the graph.
MLNamedArrayBufferViews inputs(
{{"input",
CreateArrayBufferViewForOperand(input_operand, input.values)}});
MLNamedArrayBufferViews outputs(
{{"output", CreateArrayBufferViewForOperand(output_operand)}});
auto* compute_exception =
helper->ComputeGraph(scope, graph, inputs, outputs);
EXPECT_EQ(compute_exception, nullptr);
auto results = GetArrayBufferViewValues<T>(outputs[0].second);
EXPECT_EQ(results, expected);
}
};
TEST_P(MLGraphTest, ClampTest) {
V8TestingScope scope;
{
// Test clamp operator with default options that no minimum and maximum
// values are defined.
ClampTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {-10.0, -0.5, 0.5, 10.0}},
.expected = {-10.0, -0.5, 0.5, 10.0}}
.Test(scope);
}
{
// Test clamp operator with the minimum value defined.
MLClampOptions* options = MLClampOptions::Create();
options->setMinValue(0.0);
ClampTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {-10.0, -0.5, 0.5, 10.0}},
.expected = {0.0, 0.0, 0.5, 10.0}}
.Test(scope, options);
}
{
// Test clamp operator with the maximum value defined.
MLClampOptions* options = MLClampOptions::Create();
options->setMaxValue(6.0);
ClampTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {-10.0, -0.5, 0.5, 10.0}},
.expected = {-10.0, -0.5, 0.5, 6.0}}
.Test(scope, options);
}
{
// Test clamp operator with both the minimum and maximum values defined.
MLClampOptions* options = MLClampOptions::Create();
options->setMinValue(0.0);
options->setMaxValue(6.0);
ClampTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {-10.0, -0.5, 0.5, 10.0}},
.expected = {0.0, 0.0, 0.5, 6.0}}
.Test(scope, options);
}
}
template <typename T>
MLOperand* BuildConstant(MLGraphBuilder* builder,
const Vector<uint32_t>& dimensions,
V8MLOperandType::Enum type,
const Vector<T>& values,
ExceptionState& exception_state) {
size_t buffer_size = std::accumulate(dimensions.begin(), dimensions.end(),
size_t(1), std::multiplies<uint32_t>());
auto buffer = CreateDOMArrayBufferView(buffer_size, type);
DCHECK_EQ(buffer->byteLength(), values.size() * sizeof(T));
memcpy(buffer->BaseAddress(), values.data(), buffer->byteLength());
return BuildConstant(builder, dimensions, type, exception_state, buffer);
}
template <typename T>
struct Conv2dTester {
MLGraphTestBase* helper;
OperandInfo<T> input;
OperandInfo<T> filter;
absl::optional<OperandInfo<T>> bias = absl::nullopt;
Vector<T> expected;
void Test(V8TestingScope& scope,
MLGraphBuilder* builder,
MLConv2dOptions* options = MLConv2dOptions::Create()) {
// Build the graph.
auto* input_operand = BuildInput(builder, "input", input.dimensions,
input.type, scope.GetExceptionState());
auto* filter_operand =
BuildConstant(builder, filter.dimensions, filter.type, filter.values,
scope.GetExceptionState());
if (bias) {
options->setBias(BuildConstant(builder, bias.value().dimensions,
bias.value().type, bias.value().values,
scope.GetExceptionState()));
}
auto* output_operand =
BuildConv2d(scope, builder, input_operand, filter_operand, options);
auto [graph, build_exception] =
helper->BuildGraph(scope, builder, {{"output", output_operand}});
EXPECT_NE(graph, nullptr);
// Compute the graph.
MLNamedArrayBufferViews inputs(
{{"input",
CreateArrayBufferViewForOperand(input_operand, input.values)}});
MLNamedArrayBufferViews outputs(
{{"output", CreateArrayBufferViewForOperand(output_operand)}});
auto* compute_exception =
helper->ComputeGraph(scope, graph, inputs, outputs);
EXPECT_EQ(compute_exception, nullptr);
auto results = GetArrayBufferViewValues<T>(outputs[0].second);
EXPECT_EQ(results, expected);
}
};
TEST_P(MLGraphTest, Conv2dTest) {
V8TestingScope scope;
auto* builder = CreateMLGraphBuilder(scope.GetExecutionContext());
{
// Test conv2d operator for nhwc input layout and ohwi filter layout.
auto* options = MLConv2dOptions::Create();
options->setInputLayout(V8MLInputOperandLayout::Enum::kNhwc);
options->setFilterLayout(V8MLConv2dFilterOperandLayout::Enum::kOhwi);
Conv2dTester<float>{
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 3, 3},
.values = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0}},
.filter = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {3, 1, 1, 3},
.values = {1.0, 4.0, 7.0, 2.0, 5.0, 8.0, 3.0, 6.0, 9.0}},
.expected = {30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0,
138.0, 171.0, 204.0, 174.0, 216.0, 258.0, 210.0, 261.0,
312.0}}
.Test(scope, builder, options);
}
{
// Test fused conv2d operator for nhwc input layout and ohwi filter layout,
// fusing with bias operand and relu activation.
auto* options = MLConv2dOptions::Create();
options->setInputLayout(V8MLInputOperandLayout::Enum::kNhwc);
options->setFilterLayout(V8MLConv2dFilterOperandLayout::Enum::kOhwi);
options->setActivation(builder->relu(scope.GetExceptionState()));
Conv2dTester<float>{
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 3, 3},
.values = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0}},
.filter = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {3, 1, 1, 3},
.values = {1.0, 4.0, 7.0, 2.0, 5.0, 8.0, 3.0, 6.0, 9.0}},
.bias = OperandInfo<float>{.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {3},
.values = {-6000.0, -7000.0, 8000.0}},
.expected = {0.0, 0.0, 8042.0, 0.0, 0.0, 8096.0, 0.0, 0.0, 8150.0, 0.0,
0.0, 8204.0, 0.0, 0.0, 8258.0, 0.0, 0.0, 8312.0}}
.Test(scope, builder, options);
}
{
// Test depthwise conv2d operator by setting groups to input channels,
// nhwc input layout, ihwo filter layout.
auto* options = MLConv2dOptions::Create();
options->setInputLayout(V8MLInputOperandLayout::Enum::kNhwc);
options->setFilterLayout(V8MLConv2dFilterOperandLayout::Enum::kIhwo);
options->setGroups(4);
Conv2dTester<float>{
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 4},
.values = {10.0, 21.0, 10.0, 0.0, 10.0, 22.0, 20.0, 0.0, 10.0,
23.0, 30.0, 0.0, 10.0, 24.0, 40.0, 0.0}},
.filter = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 4},
.values = {0.25, 0.0, 10.0, 50.0, 0.25, 1.0, 20.0, 50.0,
0.25, 0.0, 30.0, 50.0, 0.25, 1.0, 40.0, 50.0}},
.expected = {10.0, 46.0, 3000.0, 0.0}}
.Test(scope, builder, options);
}
{
// Test fused depthwise conv2d operator by setting groups to input channels,
// nhwc input layout, ihwo filter layout, fusing with bias operand and relu
// activation.
auto* options = MLConv2dOptions::Create();
options->setInputLayout(V8MLInputOperandLayout::Enum::kNhwc);
options->setFilterLayout(V8MLConv2dFilterOperandLayout::Enum::kIhwo);
options->setGroups(4);
options->setActivation(builder->relu(scope.GetExceptionState()));
Conv2dTester<float>{
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 4},
.values = {10.0, 21.0, 10.0, 0.0, 10.0, 22.0, 20.0, 0.0, 10.0,
23.0, 30.0, 0.0, 10.0, 24.0, 40.0, 0.0}},
.filter = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 4},
.values = {0.25, 0.0, 10.0, 50.0, 0.25, 1.0, 20.0, 50.0,
0.25, 0.0, 30.0, 50.0, 0.25, 1.0, 40.0, 50.0}},
.bias =
OperandInfo<float>{.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {4},
.values = {-6000.0, -7000.0, 8000.0, 9000.0}},
.expected = {0.0, 0.0, 11000.0, 9000.0}}
.Test(scope, builder, options);
}
{
// Test fused depthwise conv2d operator by setting groups to input channels,
// nhwc input layout, ihwo filter layout, fusing with bias operand and clamp
// activation.
auto* options = MLConv2dOptions::Create();
options->setInputLayout(V8MLInputOperandLayout::Enum::kNhwc);
options->setFilterLayout(V8MLConv2dFilterOperandLayout::Enum::kIhwo);
options->setGroups(4);
auto* clamp_options = MLClampOptions::Create();
clamp_options->setMinValue(0.0);
clamp_options->setMaxValue(6.0);
options->setActivation(
builder->clamp(clamp_options, scope.GetExceptionState()));
Conv2dTester<float>{
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 4},
.values = {10.0, 21.0, 10.0, 0.0, 10.0, 22.0, 20.0, 0.0, 10.0,
23.0, 30.0, 0.0, 10.0, 24.0, 40.0, 0.0}},
.filter = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 4},
.values = {0.25, 0.0, 10.0, 50.0, 0.25, 1.0, 20.0, 50.0,
0.25, 0.0, 30.0, 50.0, 0.25, 1.0, 40.0, 50.0}},
.bias =
OperandInfo<float>{.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {4},
.values = {-6000.0, -7000.0, 8000.0, 9000.0}},
.expected = {0.0, 0.0, 6.0, 6.0}}
.Test(scope, builder, options);
}
}
template <typename T>
struct GemmTester {
MLGraphTestBase* helper;
OperandInfo<T> a;
OperandInfo<T> b;
absl::optional<OperandInfo<T>> c = absl::nullopt;
Vector<T> expected;
void Test(V8TestingScope& scope,
MLGraphBuilder* builder,
MLGemmOptions* options = MLGemmOptions::Create()) {
// Build the graph.
auto* a_operand = BuildInput(builder, "input", a.dimensions, a.type,
scope.GetExceptionState());
auto* b_operand = BuildConstant(builder, b.dimensions, b.type, b.values,
scope.GetExceptionState());
if (c) {
options->setC(BuildConstant(builder, c.value().dimensions, c.value().type,
c.value().values, scope.GetExceptionState()));
}
auto* output_operand =
BuildGemm(scope, builder, a_operand, b_operand, options);
auto [graph, build_exception] =
helper->BuildGraph(scope, builder, {{"output", output_operand}});
EXPECT_NE(graph, nullptr);
// Compute the graph.
MLNamedArrayBufferViews inputs(
{{"input", CreateArrayBufferViewForOperand(a_operand, a.values)}});
MLNamedArrayBufferViews outputs(
{{"output", CreateArrayBufferViewForOperand(output_operand)}});
auto* compute_exception =
helper->ComputeGraph(scope, graph, inputs, outputs);
EXPECT_EQ(compute_exception, nullptr);
auto results = GetArrayBufferViewValues<T>(outputs[0].second);
}
};
TEST_P(MLGraphTest, GemmTest) {
V8TestingScope scope;
auto* builder = CreateMLGraphBuilder(scope.GetExecutionContext());
{
// Test gemm operator without operand c.
GemmTester<float>{.a = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 2},
.values = {1.0, 2.0, 2.0, 1.0}},
.b = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 1},
.values = {2.0, 4.0}},
.expected = {10.0, 8.0}}
.Test(scope, builder);
}
{
// Test gemm operator with operand c.
GemmTester<float>{
.a = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 2},
.values = {1.0, 2.0, 2.0, 1.0}},
.b = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 1},
.values = {2.0, 4.0}},
.c = OperandInfo<float>{.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1},
.values = {1.0}},
.expected = {11.0, 9.0}}
.Test(scope, builder);
}
{
// Test gemm operator with bTranspose = true.
auto* options = MLGemmOptions::Create();
options->setBTranspose(true);
GemmTester<float>{
.a = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 2},
.values = {1.0, 2.0, 2.0, 1.0}},
.b = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2},
.values = {2.0, 4.0}},
.c = OperandInfo<float>{.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1},
.values = {1.0}},
.expected = {11.0, 9.0}}
.Test(scope, builder, options);
}
}
struct HardSwishTester {
MLGraphTestBase* helper;
OperandInfo<float> input;
Vector<float> expected;
void Test(V8TestingScope& scope) {
// Build the graph.
auto* builder = CreateMLGraphBuilder(scope.GetExecutionContext());
auto* input_operand = BuildInput(builder, "input", input.dimensions,
input.type, scope.GetExceptionState());
auto* output_operand =
builder->hardSwish(input_operand, scope.GetExceptionState());
auto [graph, build_exception] =
helper->BuildGraph(scope, builder, {{"output", output_operand}});
EXPECT_NE(graph, nullptr);
// Compute the graph.
MLNamedArrayBufferViews inputs(
{{"input",
CreateArrayBufferViewForOperand(input_operand, input.values)}});
MLNamedArrayBufferViews outputs(
{{"output", CreateArrayBufferViewForOperand(output_operand)}});
auto* compute_exception =
helper->ComputeGraph(scope, graph, inputs, outputs);
EXPECT_EQ(compute_exception, nullptr);
auto results = GetArrayBufferViewValues<float>(outputs[0].second);
EXPECT_EQ(results.size(), expected.size());
for (wtf_size_t i = 0; i < expected.size(); ++i) {
EXPECT_FLOAT_EQ(results[i], expected[i]);
}
}
};
TEST_P(MLGraphTest, HardSwishTest) {
V8TestingScope scope;
{
// Test hardSwish operator for 1-D tensor.
// The expected results should be the result of the nonlinear function, y =
// x * max(0, min(6, (x + 3))) / 6, applied to the input tensor,
// element-wise.
HardSwishTester{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2},
.values = {-0.6, 0.6}},
.expected = {-0.24, 0.36}}
.Test(scope);
}
{
// Test hardSwish operator for 2-D tensor.
HardSwishTester{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 2},
.values = {-1.2, -0.6, 0.6, 1.2}},
.expected = {-0.36, -0.24, 0.36, 0.84}}
.Test(scope);
}
{
// Test hardSwish operator for 3-D tensor.
HardSwishTester{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2},
.values = {-1.2, -0.6, 0.6, 1.2}},
.expected = {-0.36, -0.24, 0.36, 0.84}}
.Test(scope);
}
{
// Test hardSwish operator for 4-D tensor.
HardSwishTester{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {-1.2, -0.6, 0.6, 1.2}},
.expected = {-0.36, -0.24, 0.36, 0.84}}
.Test(scope);
}
}
template <typename T>
struct Pool2dTester {
MLGraphTestBase* helper;
Pool2dKind kind;
OperandInfo<T> input;
Vector<T> expected;
void Test(V8TestingScope& scope,
MLPool2dOptions* options = MLPool2dOptions::Create()) {
auto* builder = CreateMLGraphBuilder(scope.GetExecutionContext());
auto* input_operand = BuildInput(builder, "input", input.dimensions,
input.type, scope.GetExceptionState());
auto* output_operand =
BuildPool2d(scope, builder, kind, input_operand, options);
auto [graph, build_exception] =
helper->BuildGraph(scope, builder, {{"output", output_operand}});
EXPECT_NE(graph, nullptr);
MLNamedArrayBufferViews inputs(
{{"input",
CreateArrayBufferViewForOperand(input_operand, input.values)}});
MLNamedArrayBufferViews outputs(
{{"output", CreateArrayBufferViewForOperand(output_operand)}});
auto* compute_exception =
helper->ComputeGraph(scope, graph, inputs, outputs);
EXPECT_EQ(compute_exception, nullptr);
auto results = GetArrayBufferViewValues<T>(outputs[0].second);
EXPECT_EQ(results, expected);
}
};
TEST_P(MLGraphTest, Pool2dTest) {
V8TestingScope scope;
{
// Test averagePool2d operator for nhwc input layout.
auto* options = MLPool2dOptions::Create();
options->setLayout(V8MLInputOperandLayout::Enum::kNhwc);
options->setWindowDimensions({3, 3});
Pool2dTester<float>{
.kind = Pool2dKind::kAverage,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 4, 4, 1},
.values = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
11.0, 12.0, 13.0, 14.0, 15.0, 16.0}},
.expected = {6.0, 7.0, 10.0, 11.0}}
.Test(scope, options);
}
{
// Test global averagePool2d operator for nhwc input layout.
auto* options = MLPool2dOptions::Create();
options->setLayout(V8MLInputOperandLayout::Enum::kNhwc);
Pool2dTester<float>{
.kind = Pool2dKind::kAverage,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 4, 4, 1},
.values = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
11.0, 12.0, 13.0, 14.0, 15.0, 16.0}},
.expected = {8.5}}
.Test(scope, options);
}
{
// Test maxPool2d operator for nhwc input layout.
auto* options = MLPool2dOptions::Create();
options->setLayout(V8MLInputOperandLayout::Enum::kNhwc);
options->setWindowDimensions({3, 3});
Pool2dTester<float>{
.kind = Pool2dKind::kMax,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 4, 4, 1},
.values = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
11.0, 12.0, 13.0, 14.0, 15.0, 16.0}},
.expected = {11.0, 12.0, 15.0, 16.0}}
.Test(scope, options);
}
}
// Because reshape Node runs copy operator, ReshapeTester just checks the output
// against the input. So there is no need to set expected results.
template <typename T>
struct ReshapeTester {
MLGraphTestBase* helper;
OperandInfo<T> input;
Vector<absl::optional<uint32_t>> new_shape;
Vector<uint32_t> expected_output_shape;
void Test(V8TestingScope& scope) {
// Build the graph.
auto* builder = CreateMLGraphBuilder(scope.GetExecutionContext());
auto* input_operand = BuildInput(builder, "input", input.dimensions,
input.type, scope.GetExceptionState());
auto* output_operand =
builder->reshape(input_operand, new_shape, scope.GetExceptionState());
EXPECT_EQ(output_operand->Dimensions(), expected_output_shape);
auto [graph, build_exception] =
helper->BuildGraph(scope, builder, {{"output", output_operand}});
EXPECT_NE(graph, nullptr);
// Compute the graph.
MLNamedArrayBufferViews inputs(
{{"input",
CreateArrayBufferViewForOperand(input_operand, input.values)}});
MLNamedArrayBufferViews outputs(
{{"output", CreateArrayBufferViewForOperand(output_operand)}});
auto* compute_exception =
helper->ComputeGraph(scope, graph, inputs, outputs);
EXPECT_EQ(compute_exception, nullptr);
auto results = GetArrayBufferViewValues<T>(outputs[0].second);
EXPECT_EQ(results, input.values);
}
};
TEST_P(MLGraphTest, ReshapeTest) {
V8TestingScope scope;
{
// Test reshaping 2-D tensor to 1-D tensor.
ReshapeTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 2},
.values = {-10.0, -0.5, 0.5, 10.0}},
.new_shape = {4},
.expected_output_shape = {4}}
.Test(scope);
}
{
// Test reshaping from 2-D tensor to 1-D tensor with calculated dimension.
ReshapeTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 2},
.values = {-10.0, -0.5, 0.5, 10.0}},
.new_shape = {absl::nullopt},
.expected_output_shape = {4}}
.Test(scope);
}
{
// Test reshaping from 4-D tensor to 2-D tensor.
ReshapeTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {-10.0, -0.5, 0.5, 10.0}},
.new_shape = {1, 4},
.expected_output_shape = {1, 4}}
.Test(scope);
}
{
// Test reshaping from 4-D tensor to 2-D tensor with calculated dimension.
ReshapeTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {-10.0, -0.5, 0.5, 10.0}},
.new_shape = {1, absl::nullopt},
.expected_output_shape = {1, 4}}
.Test(scope);
}
}
INSTANTIATE_TEST_SUITE_P(
All,
MLGraphTest,
testing::Combine(::testing::Values(BackendType::kXnnpack),
::testing::Values(ExecutionMode::kAsync,
ExecutionMode::kSync)),
TestVarietyToString);
} // namespace blink

@ -18,21 +18,43 @@ MLGraph* ToMLGraph(V8TestingScope* scope, ScriptValue value) {
scope->GetIsolate(), value.V8Value(), scope->GetExceptionState());
}
std::string ExecutionModeParamToString(
const ::testing::TestParamInfo<ExecutionMode>& execution_mode) {
switch (execution_mode.param) {
case ExecutionMode::kAsync:
return "Async";
case ExecutionMode::kSync:
return "Sync";
std::string TestVarietyToString(
const ::testing::TestParamInfo<TestVariety>& info) {
BackendType backend_type = std::get<0>(info.param);
ExecutionMode execution_mode = std::get<1>(info.param);
std::string name;
switch (backend_type) {
case BackendType::kFake:
// The name of Fake backend from test parameter doesn't output avoid
// duplicating with the fixture name |FakeMLGraphTest|.
name += "";
break;
case BackendType::kXnnpack:
name += "Xnnpack_";
break;
}
switch (execution_mode) {
case ExecutionMode::kAsync:
name += "Async";
break;
case ExecutionMode::kSync:
name += "Sync";
break;
}
return name;
}
ExecutionMode MLGraphTestBase::GetExecutionMode() {
return std::get<1>(GetParam());
}
MLGraphTestBase::BuildResult MLGraphTestBase::BuildGraph(
V8TestingScope& scope,
MLGraphBuilder* builder,
const MLNamedOperands& named_operands) {
switch (GetParam()) {
switch (GetExecutionMode()) {
case ExecutionMode::kAsync: {
ScriptPromiseTester tester(
scope.GetScriptState(),
@ -76,7 +98,7 @@ DOMException* MLGraphTestBase::ComputeGraph(V8TestingScope& scope,
MLGraph* graph,
MLNamedArrayBufferViews& inputs,
MLNamedArrayBufferViews& outputs) {
switch (GetParam()) {
switch (GetExecutionMode()) {
case ExecutionMode::kAsync: {
auto* resolver =
MakeGarbageCollected<ScriptPromiseResolver>(scope.GetScriptState());
@ -113,195 +135,4 @@ DOMException* MLGraphTestBase::ComputeGraph(V8TestingScope& scope,
}
}
template <typename T>
struct ElementWiseBinaryTester {
MLGraphTestBase* helper;
ElementWiseBinaryKind kind;
OperandInfo<T> lhs;
OperandInfo<T> rhs;
Vector<T> expected;
void Test(V8TestingScope& scope) {
// Build the graph.
auto* builder = CreateMLGraphBuilder(scope.GetExecutionContext());
auto* lhs_operand = BuildInput(builder, "lhs", lhs.dimensions, lhs.type,
scope.GetExceptionState());
auto* rhs_operand = BuildInput(builder, "rhs", rhs.dimensions, rhs.type,
scope.GetExceptionState());
auto* output_operand =
BuildElementWiseBinary(scope, builder, kind, lhs_operand, rhs_operand);
auto [graph, build_exception] =
helper->BuildGraph(scope, builder, {{"output", output_operand}});
EXPECT_NE(graph, nullptr);
// Compute the graph.
MLNamedArrayBufferViews inputs(
{{"lhs", CreateArrayBufferViewForOperand(lhs_operand, lhs.values)},
{"rhs", CreateArrayBufferViewForOperand(rhs_operand, rhs.values)}});
MLNamedArrayBufferViews outputs(
{{"output", CreateArrayBufferViewForOperand(output_operand)}});
auto* compute_exception =
helper->ComputeGraph(scope, graph, inputs, outputs);
EXPECT_EQ(compute_exception, nullptr);
auto results = GetArrayBufferViewValues<T>(outputs[0].second);
EXPECT_EQ(results, expected);
}
};
void MLGraphTestBase::TestElementWiseBinary(V8TestingScope& scope) {
{
// Test element-wise add operator for two 1-D tensors.
// The expected results should be the sum of the values of the two input
// tensors, element-wise.
ElementWiseBinaryTester<float>{
.helper = this,
.kind = ElementWiseBinaryKind::kAdd,
.lhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2},
.values = {1.0, 2.0}},
.rhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2},
.values = {3.0, 4.0}},
.expected = {4.0, 6.0}}
.Test(scope);
}
{
// Test element-wise add operator for two 2-D tensors.
ElementWiseBinaryTester<float>{
.helper = this,
.kind = ElementWiseBinaryKind::kAdd,
.lhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 2},
.values = {1.0, 2.0, 3.0, 4.0}},
.rhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 2},
.values = {5.0, 6.0, 7.0, 8.0}},
.expected = {6.0, 8.0, 10.0, 12.0}}
.Test(scope);
}
{
// Test element-wise add operator for 1-D tensor broadcasting to 2-D
// tensor.
ElementWiseBinaryTester<float>{
.helper = this,
.kind = ElementWiseBinaryKind::kAdd,
.lhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 2},
.values = {1.0, 2.0, 3.0, 4.0}},
.rhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2},
.values = {5.0, 6.0}},
.expected = {6.0, 8.0, 8.0, 10.0}}
.Test(scope);
}
{
// Test element-wise add operator for 3-D tensor broadcasting to 3-D
// tensor.
ElementWiseBinaryTester<float>{
.helper = this,
.kind = ElementWiseBinaryKind::kAdd,
.lhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2},
.values = {1.0, 2.0, 3.0, 4.0}},
.rhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 1, 2},
.values = {5.0, 6.0, 7.0, 8.0}},
.expected = {6.0, 8.0, 8.0, 10.0, 8.0, 10.0, 10.0, 12.0}}
.Test(scope);
}
{
// Test element-wise add operator for two 4-D tensors
ElementWiseBinaryTester<float>{
.kind = ElementWiseBinaryKind::kAdd,
.lhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {1.0, 2.0, 3.0, 4.0}},
.rhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {5.0, 6.0, 7.0, 8.0}},
.expected = {6.0, 8.0, 10.0, 12.0}}
.Test(scope);
}
{
// Test element-wise sub operator for two 4-D tensors.
// The expected results should be the difference of the values of the two
// input tensors, element-wise.
ElementWiseBinaryTester<float>{
.helper = this,
.kind = ElementWiseBinaryKind::kSub,
.lhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {1.0, 2.0, 3.0, 4.0}},
.rhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {5.0, 6.0, 7.0, 8.0}},
.expected = {-4.0, -4.0, -4.0, -4.0}}
.Test(scope);
}
{
// Test element-wise mul operator for two 4-D tensors.
// The expected results should be the prdocut of the values of the two input
// tensors, element-wise.
ElementWiseBinaryTester<float>{
.helper = this,
.kind = ElementWiseBinaryKind::kMul,
.lhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {1.0, 2.0, 3.0, 4.0}},
.rhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {5.0, 6.0, 7.0, 8.0}},
.expected = {5.0, 12.0, 21.0, 32.0}}
.Test(scope);
}
{
// Test element-wise div operator for two 4-D tensors.
// The expected results should be the quotient of the values of the two
// input tensors, element-wise.
ElementWiseBinaryTester<float>{
.helper = this,
.kind = ElementWiseBinaryKind::kDiv,
.lhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {3.0, 4.0, 6.0, 8.0}},
.rhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {1.0, 2.0, 2.0, 2.0}},
.expected = {3.0, 2.0, 3.0, 4.0}}
.Test(scope);
}
{
// Test element-wise min operator for two 4-D tensors.
// The expected results should be the lesser values of the two input
// tensors, element-wise.
ElementWiseBinaryTester<float>{
.helper = this,
.kind = ElementWiseBinaryKind::kMin,
.lhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {1.0, 4.0, 5.0, 8.0}},
.rhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {2.0, 3.0, 6.0, 7.0}},
.expected = {1.0, 3.0, 5.0, 7.0}}
.Test(scope);
}
{
// Test element-wise max operator for two 4-D tensors.
// The expected results should be the greater values of the two input
// tensors, element-wise.
ElementWiseBinaryTester<float>{
.helper = this,
.kind = ElementWiseBinaryKind::kMax,
.lhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {1.0, 4.0, 5.0, 8.0}},
.rhs = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {2.0, 3.0, 6.0, 7.0}},
.expected = {2.0, 4.0, 6.0, 8.0}}
.Test(scope);
}
}
} // namespace blink

@ -21,12 +21,16 @@ class V8TestingScope;
// The utility methods for graph test.
enum ExecutionMode { kAsync, kSync };
// The backends share the unit tests in the MLGraphTest.
enum BackendType { kFake, kXnnpack };
std::string ExecutionModeParamToString(
const ::testing::TestParamInfo<ExecutionMode>& execution_mode);
using TestVariety = std::tuple<BackendType, ExecutionMode>;
std::string TestVarietyToString(
const ::testing::TestParamInfo<TestVariety>& info);
class MLGraphTestBase : public ::testing::Test,
public ::testing::WithParamInterface<ExecutionMode> {
public ::testing::WithParamInterface<TestVariety> {
public:
// BuildResult is returned by Build() method. Only one member of BuildResult
// is valid. If the graph building is successful, graph points to the MLGraph
@ -37,6 +41,8 @@ class MLGraphTestBase : public ::testing::Test,
Persistent<DOMException> exception;
};
ExecutionMode GetExecutionMode();
// Helper method for testing both BuildAsyncImpl() and BuildSyncImpl() with
// the same named operands and expected results.
BuildResult BuildGraph(V8TestingScope& scope,
@ -52,11 +58,6 @@ class MLGraphTestBase : public ::testing::Test,
MLGraph* graph,
MLNamedArrayBufferViews& inputs,
MLNamedArrayBufferViews& outputs);
// Test operations with different parameters such as tensor dimensions, data
// layout. Each test case will builds a graph and computes it with input data
// to check the expected value.
void TestElementWiseBinary(V8TestingScope& scope);
};
template <typename T>

@ -0,0 +1,164 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/blink/renderer/platform/wtf/vector.h"
#include "third_party/tflite/src/tensorflow/lite/kernels/builtin_op_kernels.h"
#include "third_party/tflite/src/tensorflow/lite/model.h"
#include "third_party/tflite/src/tensorflow/lite/mutable_op_resolver.h"
#include "third_party/tflite/src/tensorflow/lite/schema/schema_generated.h"
namespace blink {
// The version number of the Schema. Ideally all changes will be backward
// compatible. If that ever changes, we must ensure that version is the first
// entry in the new tflite root so that we can see that version is not 1.
#define TFLITE_SCHEMA_VERSION (3)
// This class maintains all the currently supported TFLite
// operations for the Chromium build of TFLite and registers them for use.
class TFLiteOpResolver : public tflite::MutableOpResolver {
public:
TFLiteOpResolver() {
AddBuiltin(tflite::BuiltinOperator_ADD,
tflite::ops::builtin::Register_ADD(),
/* min_version = */ 1,
/* max_version = */ 2);
}
};
// Helper function to get the data of result into a vector.
template <typename T>
Vector<T> GetResult(const WTF::Vector<uint8_t>& result) {
Vector<T> values(base::checked_cast<wtf_size_t>(result.size() / sizeof(T)));
memcpy(values.data(), result.data(), result.size());
return values;
}
template <typename T>
struct OperandInfo {
tflite::TensorType type;
Vector<int32_t> dimensions;
Vector<T> values;
};
template <typename T>
struct ElementWiseBinaryTester {
tflite::BuiltinOperator kind;
OperandInfo<T> lhs;
OperandInfo<T> rhs;
Vector<T> expected;
void Test() {
flatbuffers::FlatBufferBuilder builder;
// It is required that the first entry in the buffers of model is always an
// empty buffer. This is so that the default buffer index of zero in Tensor
// will always refer to a valid empty buffer.
Vector<flatbuffers::Offset<tflite::Buffer>> buffers = {
tflite::CreateBuffer(builder, builder.CreateVector({})),
};
// Create tflite |Buffer| for first input tensor.
buffers.push_back(tflite::CreateBuffer(
builder, builder.CreateVector(
reinterpret_cast<const uint8_t*>(lhs.values.data()),
sizeof(T) * lhs.values.size())));
// Create tflite |Buffer| for second input tensor.
buffers.push_back(tflite::CreateBuffer(
builder, builder.CreateVector(
reinterpret_cast<const uint8_t*>(rhs.values.data()),
sizeof(T) * rhs.values.size())));
// A list of all tflite |Tensor| used in this model.
Vector<flatbuffers::Offset<tflite::Tensor>> tensors;
// Create tflite |Tensor| for first input tensor.
uint32_t lhs_buffer_index = 1;
tensors.emplace_back(tflite::CreateTensor(
builder, builder.CreateVector<int32_t>(lhs.dimensions), lhs.type,
lhs_buffer_index));
// Create tflite |Tensor| for second input tensor.
uint32_t rhs_buffer_index = 2;
tensors.emplace_back(tflite::CreateTensor(
builder, builder.CreateVector<int32_t>(rhs.dimensions), rhs.type,
rhs_buffer_index));
// Create tflite |Tensor| for output tensor.
DCHECK(lhs.dimensions == rhs.dimensions);
tensors.emplace_back(tflite::CreateTensor(
builder, builder.CreateVector<int32_t>(lhs.dimensions), lhs.type));
// A list of all tflite |Operator| used in this model.
Vector<flatbuffers::Offset<tflite::Operator>> operators;
int32_t lhs_tensor_index = 0, rhs_tensor_index = 1, output_tensor_index = 2;
Vector<int32_t> op_inputs = {lhs_tensor_index, rhs_tensor_index};
Vector<int32_t> op_outputs = {output_tensor_index};
operators.emplace_back(tflite::CreateOperator(
builder, 0, builder.CreateVector<int32_t>(op_inputs),
builder.CreateVector<int32_t>(op_outputs)));
// Create subgraph in the model.
Vector<int32_t> subgraph_outputs = {output_tensor_index};
flatbuffers::Offset<tflite::SubGraph> subgraph = tflite::CreateSubGraph(
builder, builder.CreateVector(tensors.data(), tensors.size()),
builder.CreateVector<int32_t>({}),
builder.CreateVector<int32_t>(subgraph_outputs),
builder.CreateVector(operators.data(), operators.size()));
flatbuffers::Offset<flatbuffers::String> description =
builder.CreateString("ElementWise Binary model");
Vector<flatbuffers::Offset<tflite::OperatorCode>> operator_codes = {
{tflite::CreateOperatorCode(builder, kind)}};
flatbuffers::Offset<tflite::Model> model_buffer = tflite::CreateModel(
builder, TFLITE_SCHEMA_VERSION,
builder.CreateVector(operator_codes.data(), operator_codes.size()),
builder.CreateVector(&subgraph, 1), description,
builder.CreateVector(buffers.data(), buffers.size()));
tflite::FinishModelBuffer(builder, model_buffer);
// Compute the graph.
std::unique_ptr<tflite::Interpreter> interpreter;
const tflite::Model* model = tflite::GetModel(builder.GetBufferPointer());
EXPECT_NE(model, nullptr);
TFLiteOpResolver op_resolver;
EXPECT_EQ(tflite::InterpreterBuilder(model, op_resolver)(&interpreter),
kTfLiteOk);
EXPECT_NE(interpreter, nullptr);
EXPECT_EQ(interpreter->AllocateTensors(), kTfLiteOk);
EXPECT_EQ(interpreter->Invoke(), kTfLiteOk);
// Get output data after computing the model.
EXPECT_EQ(interpreter->outputs().size(), 1u);
auto* tensor = interpreter->tensor(output_tensor_index);
WTF::Vector<uint8_t> output_data(static_cast<wtf_size_t>(tensor->bytes));
memcpy(output_data.data(), tensor->data.raw, tensor->bytes);
auto results = GetResult<T>(output_data);
EXPECT_EQ(results, expected);
}
};
class MLGraphTestTfLite : public testing::Test {
public:
MLGraphTestTfLite() = default;
~MLGraphTestTfLite() override = default;
};
TEST_F(MLGraphTestTfLite, ElementWiseAddTest) {
{
// Test element-wise add operator for two 1-D tensors.
// The expected results should be the sum of the values of the two input
// tensors, element-wise.
ElementWiseBinaryTester<float>{.kind = tflite::BuiltinOperator_ADD,
.lhs = {.type = tflite::TensorType_FLOAT32,
.dimensions = {2},
.values = {1.0, 2.0}},
.rhs = {.type = tflite::TensorType_FLOAT32,
.dimensions = {2},
.values = {3.0, 4.0}},
.expected = {4.0, 6.0}}
.Test();
}
}
} // namespace blink

@ -4,30 +4,20 @@
#include "third_party/blink/renderer/modules/ml/webnn/ml_graph_builder_test.h"
#include <numeric>
#include "base/system/sys_info.h"
#include "third_party/blink/renderer/bindings/core/v8/native_value_traits_impl.h"
#include "third_party/blink/renderer/bindings/core/v8/script_promise_resolver.h"
#include "third_party/blink/renderer/bindings/core/v8/script_promise_tester.h"
#include "third_party/blink/renderer/bindings/core/v8/v8_binding_for_testing.h"
#include "third_party/blink/renderer/bindings/core/v8/v8_dom_exception.h"
#include "third_party/blink/renderer/bindings/modules/v8/v8_ml_clamp_options.h"
#include "third_party/blink/renderer/bindings/modules/v8/v8_ml_conv_2d_options.h"
#include "third_party/blink/renderer/bindings/modules/v8/v8_ml_pool_2d_options.h"
#include "third_party/blink/renderer/bindings/modules/v8/v8_ml_resample_2d_options.h"
#include "third_party/blink/renderer/core/dom/dom_exception.h"
#include "third_party/blink/renderer/core/execution_context/execution_context.h"
#include "third_party/blink/renderer/modules/ml/ml.h"
#include "third_party/blink/renderer/modules/ml/ml_context.h"
#include "third_party/blink/renderer/modules/ml/webnn/ml_graph.h"
#include "third_party/blink/renderer/modules/ml/webnn/ml_graph_builder.h"
#include "third_party/blink/renderer/modules/ml/webnn/ml_graph_builder_utils.h"
#include "third_party/blink/renderer/modules/ml/webnn/ml_graph_test_base.h"
#include "third_party/blink/renderer/modules/ml/webnn/ml_graph_xnnpack.h"
#include "third_party/blink/renderer/modules/ml/webnn/ml_operand.h"
#include "third_party/blink/renderer/modules/ml/webnn/ml_operator.h"
#include "third_party/blink/renderer/platform/heap/thread_state.h"
#include "third_party/blink/renderer/platform/testing/unit_test_helpers.h"
namespace blink {
@ -292,150 +282,6 @@ TEST_P(MLGraphXnnpackTest, DefineXnnpackValuesTest) {
}
}
TEST_P(MLGraphXnnpackTest, ElementWiseBinaryTest) {
V8TestingScope scope;
TestElementWiseBinary(scope);
}
template <typename T>
struct ReluTester {
MLGraphXnnpackTest* helper;
OperandInfo<T> input;
Vector<T> expected;
void Test(V8TestingScope& scope) {
// Build the graph.
auto* builder = CreateMLGraphBuilder(scope.GetExecutionContext());
auto* input_operand = BuildInput(builder, "input", input.dimensions,
input.type, scope.GetExceptionState());
auto* output_operand =
builder->relu(input_operand, scope.GetExceptionState());
auto [graph, build_exception] =
helper->BuildGraph(scope, builder, {{"output", output_operand}});
EXPECT_NE(graph, nullptr);
// Compute the graph.
MLNamedArrayBufferViews inputs(
{{"input",
CreateArrayBufferViewForOperand(input_operand, input.values)}});
MLNamedArrayBufferViews outputs(
{{"output", CreateArrayBufferViewForOperand(output_operand)}});
auto* compute_exception =
helper->ComputeGraph(scope, graph, inputs, outputs);
EXPECT_EQ(compute_exception, nullptr);
auto results = GetArrayBufferViewValues<T>(outputs[0].second);
EXPECT_EQ(results, expected);
}
};
TEST_P(MLGraphXnnpackTest, ReluTest) {
V8TestingScope scope;
{
// Test relu operator for 1-D tensor.
// The expected results should be the result of the rectified linear
// function, y = max(0, x), applied to the input tensor, element-wise.
ReluTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2},
.values = {-1.0, 1.0}},
.expected = {0.0, 1.0}}
.Test(scope);
}
{
// Test relu operator for 2-D tensor.
ReluTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 2},
.values = {-10.0, -0.5, 0.5, 10.0}},
.expected = {0.0, 0.0, 0.5, 10.0}}
.Test(scope);
}
{
// Test relu operator for 3-D tensor.
ReluTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2},
.values = {-10.0, -0.5, 0.5, 10.0}},
.expected = {0.0, 0.0, 0.5, 10.0}}
.Test(scope);
}
{
// Test relu operator for 4-D tensor.
ReluTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {-10.0, -0.5, 0.5, 10.0}},
.expected = {0.0, 0.0, 0.5, 10.0}}
.Test(scope);
}
}
template <typename T>
struct Resample2dTester {
MLGraphXnnpackTest* helper;
OperandInfo<T> input;
Vector<T> expected;
void Test(V8TestingScope& scope,
MLResample2dOptions* options = MLResample2dOptions::Create()) {
// Build the graph.
auto* builder = CreateMLGraphBuilder(scope.GetExecutionContext());
auto* input_operand = BuildInput(builder, "input", input.dimensions,
input.type, scope.GetExceptionState());
auto* output_operand =
BuildResample2d(scope, builder, input_operand, options);
auto [graph, build_exception] =
helper->BuildGraph(scope, builder, {{"output", output_operand}});
EXPECT_NE(graph, nullptr);
// Compute the graph.
MLNamedArrayBufferViews inputs(
{{"input",
CreateArrayBufferViewForOperand(input_operand, input.values)}});
MLNamedArrayBufferViews outputs(
{{"output", CreateArrayBufferViewForOperand(output_operand)}});
auto* compute_exception =
helper->ComputeGraph(scope, graph, inputs, outputs);
EXPECT_EQ(compute_exception, nullptr);
auto results = GetArrayBufferViewValues<T>(outputs[0].second);
EXPECT_EQ(results, expected);
}
};
TEST_P(MLGraphXnnpackTest, Resample2dTest) {
V8TestingScope scope;
{
// Test resample2d operator with axes = {1, 2}, sizes = {4, 4}.
auto* options = MLResample2dOptions::Create();
options->setSizes({4, 4});
options->setAxes({1, 2});
options->setMode(V8MLInterpolationMode::Enum::kLinear);
Resample2dTester<float>{
.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {1, 2, 3, 4}},
.expected = {1., 1.25, 1.75, 2., 1.5, 1.75, 2.25, 2.5, 2.5, 2.75, 3.25,
3.5, 3., 3.25, 3.75, 4.}}
.Test(scope, options);
}
{
// Test resample2d operator with axes = {1, 2}, scales = {2.0, 2.0}.
auto* options = MLResample2dOptions::Create();
options->setScales({2.0, 2.0});
options->setAxes({1, 2});
options->setMode(V8MLInterpolationMode::Enum::kLinear);
Resample2dTester<float>{
.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {1, 2, 3, 4}},
.expected = {1., 1.25, 1.75, 2., 1.5, 1.75, 2.25, 2.5, 2.5, 2.75, 3.25,
3.5, 3., 3.25, 3.75, 4.}}
.Test(scope, options);
}
}
void CheckExternalValues(const MLGraphXnnpack* xnnpack_graph,
const MLNamedArrayBufferViews& inputs,
const MLNamedArrayBufferViews& outputs) {
@ -841,577 +687,6 @@ TEST_F(MLGraphXnnpackTest, ComputeAsyncTest) {
}
}
template <typename T>
struct ClampTester {
MLGraphXnnpackTest* helper;
OperandInfo<T> input;
Vector<T> expected;
void Test(V8TestingScope& scope,
MLClampOptions* options = MLClampOptions::Create()) {
// Build the graph.
auto* builder = CreateMLGraphBuilder(scope.GetExecutionContext());
auto* input_operand = BuildInput(builder, "input", input.dimensions,
input.type, scope.GetExceptionState());
auto* output_operand =
builder->clamp(input_operand, options, scope.GetExceptionState());
auto [graph, build_exception] =
helper->BuildGraph(scope, builder, {{"output", output_operand}});
EXPECT_NE(graph, nullptr);
// Compute the graph.
MLNamedArrayBufferViews inputs(
{{"input",
CreateArrayBufferViewForOperand(input_operand, input.values)}});
MLNamedArrayBufferViews outputs(
{{"output", CreateArrayBufferViewForOperand(output_operand)}});
auto* compute_exception =
helper->ComputeGraph(scope, graph, inputs, outputs);
EXPECT_EQ(compute_exception, nullptr);
auto results = GetArrayBufferViewValues<T>(outputs[0].second);
EXPECT_EQ(results, expected);
}
};
TEST_P(MLGraphXnnpackTest, ClampTest) {
V8TestingScope scope;
{
// Test clamp operator with default options that no minimum and maximum
// values are defined.
ClampTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {-10.0, -0.5, 0.5, 10.0}},
.expected = {-10.0, -0.5, 0.5, 10.0}}
.Test(scope);
}
{
// Test clamp operator with the minimum value defined.
MLClampOptions* options = MLClampOptions::Create();
options->setMinValue(0.0);
ClampTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {-10.0, -0.5, 0.5, 10.0}},
.expected = {0.0, 0.0, 0.5, 10.0}}
.Test(scope, options);
}
{
// Test clamp operator with the maximum value defined.
MLClampOptions* options = MLClampOptions::Create();
options->setMaxValue(6.0);
ClampTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {-10.0, -0.5, 0.5, 10.0}},
.expected = {-10.0, -0.5, 0.5, 6.0}}
.Test(scope, options);
}
{
// Test clamp operator with both the minimum and maximum values defined.
MLClampOptions* options = MLClampOptions::Create();
options->setMinValue(0.0);
options->setMaxValue(6.0);
ClampTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {-10.0, -0.5, 0.5, 10.0}},
.expected = {0.0, 0.0, 0.5, 6.0}}
.Test(scope, options);
}
}
template <typename T>
MLOperand* BuildConstant(MLGraphBuilder* builder,
const Vector<uint32_t>& dimensions,
V8MLOperandType::Enum type,
const Vector<T>& values,
ExceptionState& exception_state) {
size_t buffer_size = std::accumulate(dimensions.begin(), dimensions.end(),
size_t(1), std::multiplies<uint32_t>());
auto buffer = CreateDOMArrayBufferView(buffer_size, type);
DCHECK_EQ(buffer->byteLength(), values.size() * sizeof(T));
memcpy(buffer->BaseAddress(), values.data(), buffer->byteLength());
return BuildConstant(builder, dimensions, type, exception_state, buffer);
}
template <typename T>
struct Conv2dTester {
MLGraphXnnpackTest* helper;
OperandInfo<T> input;
OperandInfo<T> filter;
absl::optional<OperandInfo<T>> bias = absl::nullopt;
Vector<T> expected;
void Test(V8TestingScope& scope,
MLGraphBuilder* builder,
MLConv2dOptions* options = MLConv2dOptions::Create()) {
// Build the graph.
auto* input_operand = BuildInput(builder, "input", input.dimensions,
input.type, scope.GetExceptionState());
auto* filter_operand =
BuildConstant(builder, filter.dimensions, filter.type, filter.values,
scope.GetExceptionState());
if (bias) {
options->setBias(BuildConstant(builder, bias.value().dimensions,
bias.value().type, bias.value().values,
scope.GetExceptionState()));
}
auto* output_operand =
BuildConv2d(scope, builder, input_operand, filter_operand, options);
auto [graph, build_exception] =
helper->BuildGraph(scope, builder, {{"output", output_operand}});
EXPECT_NE(graph, nullptr);
// Compute the graph.
MLNamedArrayBufferViews inputs(
{{"input",
CreateArrayBufferViewForOperand(input_operand, input.values)}});
MLNamedArrayBufferViews outputs(
{{"output", CreateArrayBufferViewForOperand(output_operand)}});
auto* compute_exception =
helper->ComputeGraph(scope, graph, inputs, outputs);
EXPECT_EQ(compute_exception, nullptr);
auto results = GetArrayBufferViewValues<T>(outputs[0].second);
EXPECT_EQ(results, expected);
}
};
TEST_P(MLGraphXnnpackTest, Conv2dTest) {
V8TestingScope scope;
auto* builder = CreateMLGraphBuilder(scope.GetExecutionContext());
{
// Test conv2d operator for nhwc input layout and ohwi filter layout.
auto* options = MLConv2dOptions::Create();
options->setInputLayout(V8MLInputOperandLayout::Enum::kNhwc);
options->setFilterLayout(V8MLConv2dFilterOperandLayout::Enum::kOhwi);
Conv2dTester<float>{
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 3, 3},
.values = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0}},
.filter = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {3, 1, 1, 3},
.values = {1.0, 4.0, 7.0, 2.0, 5.0, 8.0, 3.0, 6.0, 9.0}},
.expected = {30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0,
138.0, 171.0, 204.0, 174.0, 216.0, 258.0, 210.0, 261.0,
312.0}}
.Test(scope, builder, options);
}
{
// Test fused conv2d operator for nhwc input layout and ohwi filter layout,
// fusing with bias operand and relu activation.
auto* options = MLConv2dOptions::Create();
options->setInputLayout(V8MLInputOperandLayout::Enum::kNhwc);
options->setFilterLayout(V8MLConv2dFilterOperandLayout::Enum::kOhwi);
options->setActivation(builder->relu(scope.GetExceptionState()));
Conv2dTester<float>{
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 3, 3},
.values = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0}},
.filter = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {3, 1, 1, 3},
.values = {1.0, 4.0, 7.0, 2.0, 5.0, 8.0, 3.0, 6.0, 9.0}},
.bias = OperandInfo<float>{.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {3},
.values = {-6000.0, -7000.0, 8000.0}},
.expected = {0.0, 0.0, 8042.0, 0.0, 0.0, 8096.0, 0.0, 0.0, 8150.0, 0.0,
0.0, 8204.0, 0.0, 0.0, 8258.0, 0.0, 0.0, 8312.0}}
.Test(scope, builder, options);
}
{
// Test depthwise conv2d operator by setting groups to input channels,
// nhwc input layout, ihwo filter layout.
auto* options = MLConv2dOptions::Create();
options->setInputLayout(V8MLInputOperandLayout::Enum::kNhwc);
options->setFilterLayout(V8MLConv2dFilterOperandLayout::Enum::kIhwo);
options->setGroups(4);
Conv2dTester<float>{
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 4},
.values = {10.0, 21.0, 10.0, 0.0, 10.0, 22.0, 20.0, 0.0, 10.0,
23.0, 30.0, 0.0, 10.0, 24.0, 40.0, 0.0}},
.filter = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 4},
.values = {0.25, 0.0, 10.0, 50.0, 0.25, 1.0, 20.0, 50.0,
0.25, 0.0, 30.0, 50.0, 0.25, 1.0, 40.0, 50.0}},
.expected = {10.0, 46.0, 3000.0, 0.0}}
.Test(scope, builder, options);
}
{
// Test fused depthwise conv2d operator by setting groups to input channels,
// nhwc input layout, ihwo filter layout, fusing with bias operand and relu
// activation.
auto* options = MLConv2dOptions::Create();
options->setInputLayout(V8MLInputOperandLayout::Enum::kNhwc);
options->setFilterLayout(V8MLConv2dFilterOperandLayout::Enum::kIhwo);
options->setGroups(4);
options->setActivation(builder->relu(scope.GetExceptionState()));
Conv2dTester<float>{
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 4},
.values = {10.0, 21.0, 10.0, 0.0, 10.0, 22.0, 20.0, 0.0, 10.0,
23.0, 30.0, 0.0, 10.0, 24.0, 40.0, 0.0}},
.filter = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 4},
.values = {0.25, 0.0, 10.0, 50.0, 0.25, 1.0, 20.0, 50.0,
0.25, 0.0, 30.0, 50.0, 0.25, 1.0, 40.0, 50.0}},
.bias =
OperandInfo<float>{.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {4},
.values = {-6000.0, -7000.0, 8000.0, 9000.0}},
.expected = {0.0, 0.0, 11000.0, 9000.0}}
.Test(scope, builder, options);
}
{
// Test fused depthwise conv2d operator by setting groups to input channels,
// nhwc input layout, ihwo filter layout, fusing with bias operand and clamp
// activation.
auto* options = MLConv2dOptions::Create();
options->setInputLayout(V8MLInputOperandLayout::Enum::kNhwc);
options->setFilterLayout(V8MLConv2dFilterOperandLayout::Enum::kIhwo);
options->setGroups(4);
auto* clamp_options = MLClampOptions::Create();
clamp_options->setMinValue(0.0);
clamp_options->setMaxValue(6.0);
options->setActivation(
builder->clamp(clamp_options, scope.GetExceptionState()));
Conv2dTester<float>{
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 4},
.values = {10.0, 21.0, 10.0, 0.0, 10.0, 22.0, 20.0, 0.0, 10.0,
23.0, 30.0, 0.0, 10.0, 24.0, 40.0, 0.0}},
.filter = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 4},
.values = {0.25, 0.0, 10.0, 50.0, 0.25, 1.0, 20.0, 50.0,
0.25, 0.0, 30.0, 50.0, 0.25, 1.0, 40.0, 50.0}},
.bias =
OperandInfo<float>{.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {4},
.values = {-6000.0, -7000.0, 8000.0, 9000.0}},
.expected = {0.0, 0.0, 6.0, 6.0}}
.Test(scope, builder, options);
}
}
template <typename T>
struct GemmTester {
MLGraphXnnpackTest* helper;
OperandInfo<T> a;
OperandInfo<T> b;
absl::optional<OperandInfo<T>> c = absl::nullopt;
Vector<T> expected;
void Test(V8TestingScope& scope,
MLGraphBuilder* builder,
MLGemmOptions* options = MLGemmOptions::Create()) {
// Build the graph.
auto* a_operand = BuildInput(builder, "input", a.dimensions, a.type,
scope.GetExceptionState());
auto* b_operand = BuildConstant(builder, b.dimensions, b.type, b.values,
scope.GetExceptionState());
if (c) {
options->setC(BuildConstant(builder, c.value().dimensions, c.value().type,
c.value().values, scope.GetExceptionState()));
}
auto* output_operand =
BuildGemm(scope, builder, a_operand, b_operand, options);
auto [graph, build_exception] =
helper->BuildGraph(scope, builder, {{"output", output_operand}});
EXPECT_NE(graph, nullptr);
// Compute the graph.
MLNamedArrayBufferViews inputs(
{{"input", CreateArrayBufferViewForOperand(a_operand, a.values)}});
MLNamedArrayBufferViews outputs(
{{"output", CreateArrayBufferViewForOperand(output_operand)}});
auto* compute_exception =
helper->ComputeGraph(scope, graph, inputs, outputs);
EXPECT_EQ(compute_exception, nullptr);
auto results = GetArrayBufferViewValues<T>(outputs[0].second);
}
};
TEST_P(MLGraphXnnpackTest, GemmTest) {
V8TestingScope scope;
auto* builder = CreateMLGraphBuilder(scope.GetExecutionContext());
{
// Test gemm operator without operand c.
GemmTester<float>{.a = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 2},
.values = {1.0, 2.0, 2.0, 1.0}},
.b = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 1},
.values = {2.0, 4.0}},
.expected = {10.0, 8.0}}
.Test(scope, builder);
}
{
// Test gemm operator with operand c.
GemmTester<float>{
.a = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 2},
.values = {1.0, 2.0, 2.0, 1.0}},
.b = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 1},
.values = {2.0, 4.0}},
.c = OperandInfo<float>{.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1},
.values = {1.0}},
.expected = {11.0, 9.0}}
.Test(scope, builder);
}
{
// Test gemm operator with bTranspose = true.
auto* options = MLGemmOptions::Create();
options->setBTranspose(true);
GemmTester<float>{
.a = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 2},
.values = {1.0, 2.0, 2.0, 1.0}},
.b = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2},
.values = {2.0, 4.0}},
.c = OperandInfo<float>{.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1},
.values = {1.0}},
.expected = {11.0, 9.0}}
.Test(scope, builder, options);
}
}
struct HardSwishTester {
MLGraphXnnpackTest* helper;
OperandInfo<float> input;
Vector<float> expected;
void Test(V8TestingScope& scope) {
// Build the graph.
auto* builder = CreateMLGraphBuilder(scope.GetExecutionContext());
auto* input_operand = BuildInput(builder, "input", input.dimensions,
input.type, scope.GetExceptionState());
auto* output_operand =
builder->hardSwish(input_operand, scope.GetExceptionState());
auto [graph, build_exception] =
helper->BuildGraph(scope, builder, {{"output", output_operand}});
EXPECT_NE(graph, nullptr);
// Compute the graph.
MLNamedArrayBufferViews inputs(
{{"input",
CreateArrayBufferViewForOperand(input_operand, input.values)}});
MLNamedArrayBufferViews outputs(
{{"output", CreateArrayBufferViewForOperand(output_operand)}});
auto* compute_exception =
helper->ComputeGraph(scope, graph, inputs, outputs);
EXPECT_EQ(compute_exception, nullptr);
auto results = GetArrayBufferViewValues<float>(outputs[0].second);
EXPECT_EQ(results.size(), expected.size());
for (wtf_size_t i = 0; i < expected.size(); ++i) {
EXPECT_FLOAT_EQ(results[i], expected[i]);
}
}
};
TEST_P(MLGraphXnnpackTest, HardSwishTest) {
V8TestingScope scope;
{
// Test hardSwish operator for 1-D tensor.
// The expected results should be the result of the nonlinear function, y =
// x * max(0, min(6, (x + 3))) / 6, applied to the input tensor,
// element-wise.
HardSwishTester{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2},
.values = {-0.6, 0.6}},
.expected = {-0.24, 0.36}}
.Test(scope);
}
{
// Test hardSwish operator for 2-D tensor.
HardSwishTester{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 2},
.values = {-1.2, -0.6, 0.6, 1.2}},
.expected = {-0.36, -0.24, 0.36, 0.84}}
.Test(scope);
}
{
// Test hardSwish operator for 3-D tensor.
HardSwishTester{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2},
.values = {-1.2, -0.6, 0.6, 1.2}},
.expected = {-0.36, -0.24, 0.36, 0.84}}
.Test(scope);
}
{
// Test hardSwish operator for 4-D tensor.
HardSwishTester{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {-1.2, -0.6, 0.6, 1.2}},
.expected = {-0.36, -0.24, 0.36, 0.84}}
.Test(scope);
}
}
template <typename T>
struct Pool2dTester {
MLGraphXnnpackTest* helper;
Pool2dKind kind;
OperandInfo<T> input;
Vector<T> expected;
void Test(V8TestingScope& scope,
MLPool2dOptions* options = MLPool2dOptions::Create()) {
auto* builder = CreateMLGraphBuilder(scope.GetExecutionContext());
auto* input_operand = BuildInput(builder, "input", input.dimensions,
input.type, scope.GetExceptionState());
auto* output_operand =
BuildPool2d(scope, builder, kind, input_operand, options);
auto [graph, build_exception] =
helper->BuildGraph(scope, builder, {{"output", output_operand}});
EXPECT_NE(graph, nullptr);
MLNamedArrayBufferViews inputs(
{{"input",
CreateArrayBufferViewForOperand(input_operand, input.values)}});
MLNamedArrayBufferViews outputs(
{{"output", CreateArrayBufferViewForOperand(output_operand)}});
auto* compute_exception =
helper->ComputeGraph(scope, graph, inputs, outputs);
EXPECT_EQ(compute_exception, nullptr);
auto results = GetArrayBufferViewValues<T>(outputs[0].second);
EXPECT_EQ(results, expected);
}
};
TEST_P(MLGraphXnnpackTest, Pool2dTest) {
V8TestingScope scope;
{
// Test averagePool2d operator for nhwc input layout.
auto* options = MLPool2dOptions::Create();
options->setLayout(V8MLInputOperandLayout::Enum::kNhwc);
options->setWindowDimensions({3, 3});
Pool2dTester<float>{
.kind = Pool2dKind::kAverage,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 4, 4, 1},
.values = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
11.0, 12.0, 13.0, 14.0, 15.0, 16.0}},
.expected = {6.0, 7.0, 10.0, 11.0}}
.Test(scope, options);
}
{
// Test global averagePool2d operator for nhwc input layout.
auto* options = MLPool2dOptions::Create();
options->setLayout(V8MLInputOperandLayout::Enum::kNhwc);
Pool2dTester<float>{
.kind = Pool2dKind::kAverage,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 4, 4, 1},
.values = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
11.0, 12.0, 13.0, 14.0, 15.0, 16.0}},
.expected = {8.5}}
.Test(scope, options);
}
{
// Test maxPool2d operator for nhwc input layout.
auto* options = MLPool2dOptions::Create();
options->setLayout(V8MLInputOperandLayout::Enum::kNhwc);
options->setWindowDimensions({3, 3});
Pool2dTester<float>{
.kind = Pool2dKind::kMax,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 4, 4, 1},
.values = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
11.0, 12.0, 13.0, 14.0, 15.0, 16.0}},
.expected = {11.0, 12.0, 15.0, 16.0}}
.Test(scope, options);
}
}
// Because XNNPACK reshape Node runs copy operator, ReshapeTester just checks
// the output against the input. So there is no need to set expected results.
template <typename T>
struct ReshapeTester {
MLGraphXnnpackTest* helper;
OperandInfo<T> input;
Vector<absl::optional<uint32_t>> new_shape;
Vector<uint32_t> expected_output_shape;
void Test(V8TestingScope& scope) {
// Build the graph.
auto* builder = CreateMLGraphBuilder(scope.GetExecutionContext());
auto* input_operand = BuildInput(builder, "input", input.dimensions,
input.type, scope.GetExceptionState());
auto* output_operand =
builder->reshape(input_operand, new_shape, scope.GetExceptionState());
EXPECT_EQ(output_operand->Dimensions(), expected_output_shape);
auto [graph, build_exception] =
helper->BuildGraph(scope, builder, {{"output", output_operand}});
EXPECT_NE(graph, nullptr);
// Compute the graph.
MLNamedArrayBufferViews inputs(
{{"input",
CreateArrayBufferViewForOperand(input_operand, input.values)}});
MLNamedArrayBufferViews outputs(
{{"output", CreateArrayBufferViewForOperand(output_operand)}});
auto* compute_exception =
helper->ComputeGraph(scope, graph, inputs, outputs);
EXPECT_EQ(compute_exception, nullptr);
auto results = GetArrayBufferViewValues<T>(outputs[0].second);
EXPECT_EQ(results, input.values);
}
};
TEST_P(MLGraphXnnpackTest, ReshapeTest) {
V8TestingScope scope;
{
// Test reshaping 2-D tensor to 1-D tensor.
ReshapeTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 2},
.values = {-10.0, -0.5, 0.5, 10.0}},
.new_shape = {4},
.expected_output_shape = {4}}
.Test(scope);
}
{
// Test reshaping from 2-D tensor to 1-D tensor with calculated dimension.
ReshapeTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {2, 2},
.values = {-10.0, -0.5, 0.5, 10.0}},
.new_shape = {absl::nullopt},
.expected_output_shape = {4}}
.Test(scope);
}
{
// Test reshaping from 4-D tensor to 2-D tensor.
ReshapeTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {-10.0, -0.5, 0.5, 10.0}},
.new_shape = {1, 4},
.expected_output_shape = {1, 4}}
.Test(scope);
}
{
// Test reshaping from 4-D tensor to 2-D tensor with calculated dimension.
ReshapeTester<float>{.helper = this,
.input = {.type = V8MLOperandType::Enum::kFloat32,
.dimensions = {1, 2, 2, 1},
.values = {-10.0, -0.5, 0.5, 10.0}},
.new_shape = {1, absl::nullopt},
.expected_output_shape = {1, 4}}
.Test(scope);
}
}
// The outputs of softmax function,
// https://en.wikipedia.org/wiki/Softmax_function, are floating-point numbers
// with mantissa. The WPT WebNN conformance test cases of softmax operator,
@ -1625,12 +900,12 @@ TEST_P(MLGraphXnnpackTest, SigmoidTest) {
}
}
// TODO(crbug.com/1273291): Test the async execution mode once the
// MLGraphXnnpack implements it.
INSTANTIATE_TEST_SUITE_P(All,
MLGraphXnnpackTest,
::testing::Values(ExecutionMode::kSync,
ExecutionMode::kAsync),
ExecutionModeParamToString);
INSTANTIATE_TEST_SUITE_P(
All,
MLGraphXnnpackTest,
testing::Combine(::testing::Values(BackendType::kXnnpack),
::testing::Values(ExecutionMode::kAsync,
ExecutionMode::kSync)),
TestVarietyToString);
} // namespace blink