0

webnn: CoreML support int8/uint8 for immediate constant values

Add int8/uint8 support for immediate values too. These are currently
used for scalar constants and int32 constants.

Bug: 333952702
Change-Id: I203a226dd6add99f5622790db3ea2a273b8b9479
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/5958133
Reviewed-by: Austin Sullivan <asully@chromium.org>
Commit-Queue: Phillis Tang <phillis@chromium.org>
Cr-Commit-Position: refs/heads/main@{#1373537}
This commit is contained in:
Phillis Tang
2024-10-24 20:15:06 +00:00
committed by Chromium LUCI CQ
parent 5e3543e5f2
commit 3f32181616
3 changed files with 126 additions and 20 deletions
services/webnn/coreml
third_party/blink/web_tests/external/wpt/webnn/conformance_tests

@ -405,6 +405,16 @@ struct MilDataTypeMap<int32_t> {
CoreML::Specification::MILSpec::DataType::INT32;
};
template <>
struct MilDataTypeMap<int8_t> {
static constexpr CoreML::Specification::MILSpec::DataType value =
CoreML::Specification::MILSpec::DataType::INT8;
};
template <>
struct MilDataTypeMap<uint8_t> {
static constexpr CoreML::Specification::MILSpec::DataType value =
CoreML::Specification::MILSpec::DataType::UINT8;
};
template <>
struct MilDataTypeMap<Float16> {
static constexpr CoreML::Specification::MILSpec::DataType value =
CoreML::Specification::MILSpec::DataType::FLOAT16;
@ -441,6 +451,20 @@ void SetTensorValueForImmediateValue<Float16>(
tensor.mutable_bytes()->mutable_values()->assign(
base::as_string_view(base::as_bytes(value)));
}
template <>
void SetTensorValueForImmediateValue<int8_t>(
CoreML::Specification::MILSpec::TensorValue& tensor,
base::span<const int8_t> value) {
tensor.mutable_bytes()->mutable_values()->assign(
base::as_string_view(base::as_bytes(value)));
}
template <>
void SetTensorValueForImmediateValue<uint8_t>(
CoreML::Specification::MILSpec::TensorValue& tensor,
base::span<const uint8_t> value) {
tensor.mutable_bytes()->mutable_values()->assign(base::as_string_view(value));
}
template <>
void SetTensorValueForImmediateValue<float>(
CoreML::Specification::MILSpec::TensorValue& tensor,
@ -3207,7 +3231,8 @@ void GraphBuilderCoreml::AddConstantImmediateValue(
uint64_t constant_id,
CoreML::Specification::MILSpec::Block& block) {
auto* op = block.add_operations();
PopulateConstantOpFromOperand(constant_id, *op);
op->set_type(kOpConstTypeName);
PopulateNamedValueType(constant_id, *op->add_outputs());
google::protobuf::Map<std::string, ::CoreML::Specification::MILSpec::Value>&
attributes = *op->mutable_attributes();
@ -3248,11 +3273,29 @@ void GraphBuilderCoreml::AddConstantImmediateValue(
constant_operand->descriptor().shape(), ints);
break;
}
case OperandDataType::kInt8: {
base::FixedArray<int8_t> int8s(value.size() / sizeof(int8_t));
for (size_t i = 0u; i < int8s.size(); ++i) {
int8s[i] = base::I8FromNativeEndian(
value.subspan(i * sizeof(int8_t)).first<1u>());
}
attributes["val"] = CreateTensorImmediateValue<int8_t>(
constant_operand->descriptor().shape(), int8s);
break;
}
case OperandDataType::kUint8: {
base::FixedArray<uint8_t> uint8s(value.size() / sizeof(uint8_t));
for (size_t i = 0u; i < uint8s.size(); ++i) {
uint8s[i] = base::U8FromNativeEndian(
value.subspan(i * sizeof(uint8_t)).first<1u>());
}
attributes["val"] = CreateTensorImmediateValue<uint8_t>(
constant_operand->descriptor().shape(), uint8s);
break;
}
case OperandDataType::kUint32:
case OperandDataType::kInt64:
case OperandDataType::kUint64:
case OperandDataType::kInt8:
case OperandDataType::kUint8:
case OperandDataType::kInt4:
case OperandDataType::kUint4: {
NOTREACHED() << "Unsupported data type.";
@ -3283,17 +3326,6 @@ GraphBuilderCoreml::GetOperandInfo(uint64_t operand_id) const {
return result_->GetOperandInfo(operand_id);
}
void GraphBuilderCoreml::PopulateConstantOpFromOperand(
uint64_t constant_id,
CoreML::Specification::MILSpec::Operation& op) {
CoreML::Specification::MILSpec::DataType mil_data_type =
GetOperandInfo(constant_id).mil_data_type;
CHECK(kFloatsAndInt32DataTypes.contains(mil_data_type));
op.set_type(kOpConstTypeName);
PopulateNamedValueType(constant_id, *op.add_outputs());
}
base::expected<void, mojom::ErrorPtr>
GraphBuilderCoreml::PopulateFeatureDescription(
uint64_t operand_id,

@ -40,7 +40,7 @@ template <typename T, typename... U>
concept IsAnyOf = (std::same_as<T, U> || ...);
template <typename T>
concept IsSupportedTensorType =
IsAnyOf<T, Float16, float, int32_t, int8_t, char, bool>;
IsAnyOf<T, Float16, float, int32_t, int8_t, uint8_t, char, bool>;
} // namespace internal
inline constexpr char kPlaceholderInputName[] = "placeholder";
@ -327,11 +327,6 @@ class GraphBuilderCoreml {
uint64_t constant_id,
uint64_t offset);
// Populate generic fields that apply to all `const` operations.
void PopulateConstantOpFromOperand(
uint64_t constant_id,
CoreML::Specification::MILSpec::Operation& op);
// Helpers.
const mojom::Operand& GetOperand(uint64_t operand_id) const;

@ -623,6 +623,35 @@ const castTests = [
}
}
},
{
'name': 'cast int32 4D constant tensor to float32',
'graph': {
'inputs': {
'castInput': {
'data': [
45, 55, 11, 21, 78, 104, 102, 66, 41, 110, 92, 69,
48, 23, 58, 12, 33, 24, 101, 87, 49, 118, 1, 77
],
'descriptor': {shape: [2, 2, 2, 3], dataType: 'int32'},
'constant': true,
}
},
'operators': [{
'name': 'cast',
'arguments': [{'input': 'castInput'}, {'type': 'float32'}],
'outputs': 'castOutput'
}],
'expectedOutputs': {
'castOutput': {
'data': [
45, 55, 11, 21, 78, 104, 102, 66, 41, 110, 92, 69,
48, 23, 58, 12, 33, 24, 101, 87, 49, 118, 1, 77
],
'descriptor': {shape: [2, 2, 2, 3], dataType: 'float32'}
}
}
}
},
{
'name': 'cast int32 4D tensor to float16',
'graph': {
@ -1073,6 +1102,56 @@ const castTests = [
}
}
},
{
'name': 'cast int8 0D constant tensor to int32',
'graph': {
'inputs': {
'castInput': {
'data': [17],
'descriptor': {shape: [], dataType: 'int8'},
'constant': true
}
},
'operators': [{
'name': 'cast',
'arguments': [{'input': 'castInput'}, {'type': 'int32'}],
'outputs': 'castOutput'
}],
'expectedOutputs': {
'castOutput':
{'data': [17], 'descriptor': {shape: [], dataType: 'int32'}}
}
}
},
{
'name': 'cast int8 1D constant tensor to int32',
'graph': {
'inputs': {
'castInput': {
'data': [
123, 17, 31, 77, 88, 44, 84, 40, 14, 64, 109, 4,
2, 0, 45, 47, 72, 88, 82, 4, 73, 36, 65, 117
],
'descriptor': {shape: [24], dataType: 'int8'},
'constant': true
}
},
'operators': [{
'name': 'cast',
'arguments': [{'input': 'castInput'}, {'type': 'int32'}],
'outputs': 'castOutput'
}],
'expectedOutputs': {
'castOutput': {
'data': [
123, 17, 31, 77, 88, 44, 84, 40, 14, 64, 109, 4,
2, 0, 45, 47, 72, 88, 82, 4, 73, 36, 65, 117
],
'descriptor': {shape: [24], dataType: 'int32'}
}
}
}
},
{
'name': 'cast int8 4D tensor to float32',
'graph': {