diff --git a/kernels/portable/cpu/op__clone_dim_order.cpp b/kernels/portable/cpu/op__clone_dim_order.cpp new file mode 100644 index 00000000000..83045768cf2 --- /dev/null +++ b/kernels/portable/cpu/op__clone_dim_order.cpp @@ -0,0 +1,80 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include + +namespace torch { +namespace executor { +namespace native { + +using Tensor = executorch::aten::Tensor; + +template +using OptionalArrayRef = executorch::aten::OptionalArrayRef; + +/** + * _clone_dim_order.out(Tensor self, *, bool non_blocking=False, int[]? + * dim_order=None, Tensor(a!) out) -> Tensor(a!) + * + * Clones via element-wise copy while preserving dim_order. + */ +Tensor& _clone_dim_order_out( + KernelRuntimeContext& ctx, + const Tensor& self, + bool non_blocking, + OptionalArrayRef dim_order, + Tensor& out) { + (void)ctx; + + // Ensure input and output dtype match. + ET_KERNEL_CHECK( + ctx, self.scalar_type() == out.scalar_type(), InvalidArgument, out); + + // Ensure output has the same layout as input or matches dim_order. + ET_KERNEL_CHECK( + ctx, + check__to_dim_order_copy_args(self, non_blocking, dim_order, out), + InvalidArgument, + out); + + // Ensure input and output shapes match, resizing if necessary. + ET_KERNEL_CHECK( + ctx, + resize_tensor(out, self.sizes()) == torch::executor::Error::Ok, + InvalidArgument, + out); + + if (self.numel() == 0) { + return out; + } + + // Select the correct input dtype and copy the tensors. + ET_SWITCH_REALHBBF16_TYPES( + self.scalar_type(), + ctx, + "dim_order_ops::_clone_dim_order.out", + CTYPE, + [&] { _to_dim_order_copy_impl(self, out); }); + + return out; +} + +Tensor& _clone_dim_order_out( + const Tensor& self, + bool non_blocking, + OptionalArrayRef dim_order, + Tensor& out) { + executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; + return _clone_dim_order_out(context, self, non_blocking, dim_order, out); +} + +} // namespace native +} // namespace executor +} // namespace torch \ No newline at end of file diff --git a/kernels/portable/cpu/op__to_dim_order_copy.cpp b/kernels/portable/cpu/op__to_dim_order_copy.cpp index fb47ff7b6ef..b6e35f90cdb 100644 --- a/kernels/portable/cpu/op__to_dim_order_copy.cpp +++ b/kernels/portable/cpu/op__to_dim_order_copy.cpp @@ -29,29 +29,6 @@ using OptionalArrayRef = executorch::aten::OptionalArrayRef; template using Optional = std::optional; -namespace { - -template -void _to_dim_order_copy_impl(const Tensor& self, Tensor& out) { - auto self_data = self.mutable_data_ptr(); - auto out_data = out.mutable_data_ptr(); - - // Here we make a slightly off-label use of - // BroadcastIndexesRange. It always assumes it doesn't have to care - // about different dim_order between input and output, but we can - // just force it to respect strides (and thus dim_order) for its - // inputs using support_noncontiguous_input_tensors=true, and then pretend - // the output is just another input. - for (const auto [unused_index, self_data_index, out_data_index] : - BroadcastIndexesRange<2, /*support_noncontiguous_input_tensors=*/true>( - /*dummy output*/ self, self, out)) { - (void)unused_index; - out_data[out_data_index] = - static_cast(self_data[self_data_index]); - } -} -} // namespace - // _to_dim_order_copy.out(Tensor self, *, bool non_blocking=False, int[]? // dim_order=None, Tensor(a!) out) -> Tensor(a!) Tensor& _to_dim_order_copy_out( diff --git a/kernels/portable/cpu/util/copy_ops_util.h b/kernels/portable/cpu/util/copy_ops_util.h index e7cd6f6790c..15a7916e0e8 100644 --- a/kernels/portable/cpu/util/copy_ops_util.h +++ b/kernels/portable/cpu/util/copy_ops_util.h @@ -9,6 +9,7 @@ #pragma once #include +#include #include namespace torch { @@ -77,6 +78,29 @@ void as_strided_copy( } } +/** + * Copies and casts a tensor while preserving input dim_order. + */ +template +void _to_dim_order_copy_impl(const Tensor& self, Tensor& out) { + auto self_data = self.mutable_data_ptr(); + auto out_data = out.mutable_data_ptr(); + + // Here we make a slightly off-label use of + // BroadcastIndexesRange. It always assumes it doesn't have to care + // about different dim_order between input and output, but we can + // just force it to respect strides (and thus dim_order) for its + // inputs using support_noncontiguous_input_tensors=true, and then pretend + // the output is just another input. + for (const auto [unused_index, self_data_index, out_data_index] : + BroadcastIndexesRange<2, /*support_noncontiguous_input_tensors=*/true>( + /*dummy output*/ self, self, out)) { + (void)unused_index; + out_data[out_data_index] = + static_cast(self_data[self_data_index]); + } +} + bool check_cat_args( executorch::aten::ArrayRef tensors, int64_t dim, diff --git a/kernels/portable/cpu/util/targets.bzl b/kernels/portable/cpu/util/targets.bzl index 1806ebb0d5a..8194b37f319 100644 --- a/kernels/portable/cpu/util/targets.bzl +++ b/kernels/portable/cpu/util/targets.bzl @@ -147,6 +147,9 @@ def define_common_targets(): "copy_ops_util.h", ], compiler_flags = ["-Wno-missing-prototypes"], + exported_deps = [ + ":broadcast_util", + ], deps = [ "//executorch/runtime/kernel:kernel_includes", ], @@ -348,7 +351,6 @@ def define_common_targets(): ], ) - runtime.cxx_library( name = "arange_util{}".format(suffix), srcs = ["arange_util.cpp"], diff --git a/kernels/portable/functions.yaml b/kernels/portable/functions.yaml index feaee415f91..cb04241096f 100644 --- a/kernels/portable/functions.yaml +++ b/kernels/portable/functions.yaml @@ -1009,3 +1009,8 @@ kernels: - arg_meta: null kernel_name: torch::executor::_to_dim_order_copy_out + +- func: dim_order_ops::_clone_dim_order.out(Tensor self, *, bool non_blocking=False, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!) + kernels: + - arg_meta: null + kernel_name: torch::executor::_clone_dim_order_out \ No newline at end of file diff --git a/kernels/test/CMakeLists.txt b/kernels/test/CMakeLists.txt index f5997a1ee3f..f4e8d0ee311 100644 --- a/kernels/test/CMakeLists.txt +++ b/kernels/test/CMakeLists.txt @@ -108,6 +108,7 @@ add_custom_target( set(all_test_sources "BinaryLogicalOpTest.cpp" "op__to_dim_order_copy_test.cpp" + "op__clone_dim_order_test.cpp" "op_abs_test.cpp" "op_acos_test.cpp" "op_acosh_test.cpp" diff --git a/kernels/test/op__clone_dim_order_test.cpp b/kernels/test/op__clone_dim_order_test.cpp new file mode 100644 index 00000000000..d999897cdf3 --- /dev/null +++ b/kernels/test/op__clone_dim_order_test.cpp @@ -0,0 +1,365 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include + +#include // Declares the operator. +#include +#include +#include +#include +#include + +#include + +using namespace ::testing; +using executorch::aten::ArrayRef; +using executorch::aten::ScalarType; +using executorch::aten::Tensor; +using std::optional; +using torch::executor::testing::TensorFactory; + +class OpDimOrderCloneTest : public OperatorTest { + protected: + Tensor& op__clone_dim_order_out( + const Tensor& self, + bool non_blocking, + std::optional> dim_order, + Tensor& out) { + return torch::executor::dim_order_ops::_clone_dim_order_outf( + context_, self, non_blocking, dim_order, out); + } + + template + std::vector vector_type_cast(std::vector input) { + std::vector output(input.size()); + std::transform( + input.begin(), input.end(), output.begin(), [](INPUT_CTYPE x) { + return static_cast(x); + }); + return output; + } + + template + struct ToTestCase { + const std::vector sizes; + const std::vector data_in; + const std::vector data_out; + }; + + template + void test_runner_clone(std::vector> test_cases) { + TensorFactory tf_in; + TensorFactory tf_out; + + for (const auto& test_case : test_cases) { + auto data_in = vector_type_cast(test_case.data_in); + + Tensor input = tf_in.make(test_case.sizes, data_in); + Tensor output = tf_out.zeros_like(input); + + std::vector dim_order_vec; + for (int64_t i = 0; i < input.dim(); i++) { + dim_order_vec.push_back(i); + } + ArrayRef dim_order(dim_order_vec.data(), dim_order_vec.size()); + + Tensor ret = op__clone_dim_order_out( + /*self=*/input, + /*non_blocking=*/false, + dim_order, + output); + + Tensor expected = tf_out.make(test_case.sizes, data_in); + + // Verifies that the returned and output tensor from _clone_dim_order both + // match the original input (expected). + EXPECT_TENSOR_EQ(ret, output); + EXPECT_TENSOR_EQ(ret, expected); + } + } + + // Helper for testing dynamic shape outputs. + void test_dynamic_shape( + const std::vector& out_shape, + enum torch::executor::TensorShapeDynamism dynamism) { + TensorFactory tf; + + Tensor x = tf.make( + {2, 3}, + {0.49625658988952637, + 0.7682217955589294, + 0.08847743272781372, + 0.13203048706054688, + 0.30742281675338745, + 0.6340786814689636}); + Tensor expected = tf.make( + {2, 3}, + {0.49625658988952637, + 0.7682217955589294, + 0.08847743272781372, + 0.13203048706054688, + 0.30742281675338745, + 0.6340786814689636}); + + bool non_blocking = false; + + Tensor out = tf.zeros(out_shape, dynamism); + + std::vector dim_order_vec; + for (int64_t i = 0; i < x.dim(); i++) { + dim_order_vec.push_back(i); + } + ArrayRef dim_order(dim_order_vec.data(), dim_order_vec.size()); + + Tensor ret = op__clone_dim_order_out( + /*self=*/x, non_blocking, dim_order, out); + + EXPECT_TENSOR_EQ(out, expected); + EXPECT_TENSOR_EQ(ret, expected); + } +}; + +// Clones tensors of all real dtypes. +TEST_F(OpDimOrderCloneTest, AllDtypesSupported) { + std::vector> test_cases = { + { + /*sizes=*/{2, 4}, + /*data_in=*/{2.11, 3.2, 2.3, 4.0, 1.1, 5.2, 1.1, 6.3}, + /*data_out=*/{}, // data_out shouldn't be used in test_runner_clone + }, + { + /*sizes=*/{3, 4, 0, 5}, + /*data_in=*/{}, + /*data_out=*/{}, + }, + { + /*sizes=*/{}, + /*data_in=*/{10.0}, + /*data_out=*/{}, // data_out shouldn't be used in test_runner_clone + }, + }; + +#define TEST_KERNEL(CTYPE, DTYPE) \ + test_runner_clone(test_cases); + + ET_FORALL_REAL_TYPES(TEST_KERNEL); + +#undef TEST_KERNEL +} + +// Cloning with mismatched input and output tensor shapes should fail. +TEST_F(OpDimOrderCloneTest, MismatchedSizesDie) { + if (torch::executor::testing::SupportedFeatures::get()->is_aten) { + GTEST_SKIP() << "Skipping: ATen kernel supports mismatched sizes."; + } + TensorFactory tf; + Tensor input = tf.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6}); + Tensor out = tf.zeros({3, 2, 1, 1}); + std::vector dim_order_vec; + for (int64_t i = 0; i < input.dim(); i++) { + dim_order_vec.push_back(i); + } + ArrayRef dim_order(dim_order_vec.data(), dim_order_vec.size()); + + ET_EXPECT_KERNEL_FAILURE( + context_, + op__clone_dim_order_out( + /*self=*/input, + /*non_blocking=*/false, + dim_order, + out)); +} + +// Cloning with an unsupported memory format should fail. +TEST_F(OpDimOrderCloneTest, MismatchedMemoryFormatDies) { + if (torch::executor::testing::SupportedFeatures::get()->is_aten) { + GTEST_SKIP() + << "Skipping: ATen kernel supports non-contiguous memory formats."; + } + TensorFactory tf_in; + TensorFactory tf_out; + Tensor input = + tf_in.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6}); + Tensor out = tf_out.zeros({3, 1, 1, 2}); + + std::vector dim_order_vec; + for (int64_t i = 0; i < input.dim(); i++) { + dim_order_vec.push_back(i); + } + + // Mutate dim_order_vec to create an illegal dim_order. + dim_order_vec[1] = 3; + dim_order_vec[3] = 1; + ArrayRef dim_order(dim_order_vec.data(), dim_order_vec.size()); + + ET_EXPECT_KERNEL_FAILURE( + context_, + op__clone_dim_order_out( + /*self=*/input, + /*non_blocking=*/false, + dim_order, + out)); +} + +// Cloning with non‑blocking=true should fail because portable kernels only +// support blocking. +TEST_F(OpDimOrderCloneTest, MismatchedBlockingDie) { + if (torch::executor::testing::SupportedFeatures::get()->is_aten) { + GTEST_SKIP() + << "Skipping: ATen kernel supports non-blocking data transfer."; + } + TensorFactory tf; + Tensor input = tf.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6}); + Tensor out = tf.zeros(/*sizes=*/{3, 1, 1, 2}); + + std::vector dim_order_vec; + for (int64_t i = 0; i < input.dim(); i++) { + dim_order_vec.push_back(i); + } + ArrayRef dim_order(dim_order_vec.data(), dim_order_vec.size()); + + ET_EXPECT_KERNEL_FAILURE( + context_, + op__clone_dim_order_out( + /*self=*/input, + /*non_blocking=*/true, + dim_order, + out)); +} + +TEST_F(OpDimOrderCloneTest, DynamicShapeUpperBoundSameAsExpected) { + test_dynamic_shape( + {2, 3}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND); +} + +TEST_F(OpDimOrderCloneTest, DynamicShapeUpperBoundLargerThanExpected) { + test_dynamic_shape( + {10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND); +} + +TEST_F(OpDimOrderCloneTest, DynamicShapeUnbound) { + if (!torch::executor::testing::SupportedFeatures::get()->output_resize) { + GTEST_SKIP() << "Skipping: Dynamic shape unbound not supported."; + } + test_dynamic_shape( + {1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND); +} + +TEST_F(OpDimOrderCloneTest, ContiguousToChannelsLast) { + TensorFactory tf; + + // x is in contiguous dim order {0, 1, 2, 3}. + // make_with_dimorder() defaults to contiguous when dim_order isn't specified. + Tensor x = tf.make_with_dimorder( + {3, 5, 2, 2}, + {0.2432, 0.5248, 0.5361, 0.8513, 0.8184, 0.8206, 0.7357, 0.9655, 0.6138, + 0.1112, 0.2799, 0.1079, 0.9680, 0.2548, 0.0393, 0.6002, 0.2257, 0.8766, + 0.2715, 0.1595, 0.2029, 0.7026, 0.6982, 0.8529, 0.4405, 0.6560, 0.9217, + 0.6372, 0.2446, 0.6590, 0.3866, 0.7185, 0.4439, 0.5346, 0.3179, 0.4492, + 0.3491, 0.6970, 0.8456, 0.2516, 0.2345, 0.2924, 0.7695, 0.0911, 0.8530, + 0.8560, 0.6909, 0.7719, 0.8923, 0.5546, 0.6978, 0.8151, 0.3007, 0.3961, + 0.8416, 0.4296, 0.7203, 0.8963, 0.3597, 0.5552}); + + Tensor out = tf.full_channels_last({3, 5, 2, 2}, 0.0); + Tensor expected = tf.make_with_dimorder( + {3, 5, 2, 2}, + {0.2432, 0.8184, 0.6138, 0.9680, 0.2257, 0.5248, 0.8206, 0.1112, 0.2548, + 0.8766, 0.5361, 0.7357, 0.2799, 0.0393, 0.2715, 0.8513, 0.9655, 0.1079, + 0.6002, 0.1595, 0.2029, 0.4405, 0.2446, 0.4439, 0.3491, 0.7026, 0.6560, + 0.6590, 0.5346, 0.6970, 0.6982, 0.9217, 0.3866, 0.3179, 0.8456, 0.8529, + 0.6372, 0.7185, 0.4492, 0.2516, 0.2345, 0.8530, 0.8923, 0.3007, 0.7203, + 0.2924, 0.8560, 0.5546, 0.3961, 0.8963, 0.7695, 0.6909, 0.6978, 0.8416, + 0.3597, 0.0911, 0.7719, 0.8151, 0.4296, 0.5552}, + /*dim_order=*/{0, 2, 3, 1}); + + std::vector dim_order_vec = {0, 2, 3, 1}; + executorch::aten::ArrayRef dim_order( + dim_order_vec.data(), dim_order_vec.size()); + Tensor ret = op__clone_dim_order_out( + /*self*/ x, /*non_blocking*/ false, /*dim_order*/ dim_order, out); + + EXPECT_TENSOR_EQ(out, expected); + EXPECT_TENSOR_EQ(ret, expected); +} + +TEST_F(OpDimOrderCloneTest, ChannelsLastToContiguous) { + TensorFactory tf; + + Tensor out = tf.full({3, 5, 2, 2}, 0.0); + + // x is in channels_last dim order {0, 2, 3, 1}. + Tensor x = tf.make_with_dimorder( + {3, 5, 2, 2}, + {0.2432, 0.8184, 0.6138, 0.9680, 0.2257, 0.5248, 0.8206, 0.1112, 0.2548, + 0.8766, 0.5361, 0.7357, 0.2799, 0.0393, 0.2715, 0.8513, 0.9655, 0.1079, + 0.6002, 0.1595, 0.2029, 0.4405, 0.2446, 0.4439, 0.3491, 0.7026, 0.6560, + 0.6590, 0.5346, 0.6970, 0.6982, 0.9217, 0.3866, 0.3179, 0.8456, 0.8529, + 0.6372, 0.7185, 0.4492, 0.2516, 0.2345, 0.8530, 0.8923, 0.3007, 0.7203, + 0.2924, 0.8560, 0.5546, 0.3961, 0.8963, 0.7695, 0.6909, 0.6978, 0.8416, + 0.3597, 0.0911, 0.7719, 0.8151, 0.4296, 0.5552}, + /*dim_order=*/{0, 2, 3, 1}); + + Tensor expected = tf.make_with_dimorder( + {3, 5, 2, 2}, + {0.2432, 0.5248, 0.5361, 0.8513, 0.8184, 0.8206, 0.7357, 0.9655, 0.6138, + 0.1112, 0.2799, 0.1079, 0.9680, 0.2548, 0.0393, 0.6002, 0.2257, 0.8766, + 0.2715, 0.1595, 0.2029, 0.7026, 0.6982, 0.8529, 0.4405, 0.6560, 0.9217, + 0.6372, 0.2446, 0.6590, 0.3866, 0.7185, 0.4439, 0.5346, 0.3179, 0.4492, + 0.3491, 0.6970, 0.8456, 0.2516, 0.2345, 0.2924, 0.7695, 0.0911, 0.8530, + 0.8560, 0.6909, 0.7719, 0.8923, 0.5546, 0.6978, 0.8151, 0.3007, 0.3961, + 0.8416, 0.4296, 0.7203, 0.8963, 0.3597, 0.5552}); + + std::vector dim_order_vec = {0, 1, 2, 3}; + executorch::aten::ArrayRef dim_order( + dim_order_vec.data(), dim_order_vec.size()); + Tensor ret = op__clone_dim_order_out( + /*self*/ x, /*non_blocking*/ false, /*dim_order*/ dim_order, out); + + EXPECT_TENSOR_EQ(out, expected); + EXPECT_TENSOR_EQ(ret, expected); +} + +TEST_F(OpDimOrderCloneTest, PreserveChannelsLast) { + TensorFactory tf; + + Tensor out = tf.full_channels_last({3, 5, 2, 2}, 0.0); + Tensor x = tf.make_with_dimorder( + {3, 5, 2, 2}, + {0.2432, 0.8184, 0.6138, 0.9680, 0.2257, 0.5248, 0.8206, 0.1112, 0.2548, + 0.8766, 0.5361, 0.7357, 0.2799, 0.0393, 0.2715, 0.8513, 0.9655, 0.1079, + 0.6002, 0.1595, 0.2029, 0.4405, 0.2446, 0.4439, 0.3491, 0.7026, 0.6560, + 0.6590, 0.5346, 0.6970, 0.6982, 0.9217, 0.3866, 0.3179, 0.8456, 0.8529, + 0.6372, 0.7185, 0.4492, 0.2516, 0.2345, 0.8530, 0.8923, 0.3007, 0.7203, + 0.2924, 0.8560, 0.5546, 0.3961, 0.8963, 0.7695, 0.6909, 0.6978, 0.8416, + 0.3597, 0.0911, 0.7719, 0.8151, 0.4296, 0.5552}, + /*dim_order=*/{0, 2, 3, 1}); + + Tensor expected = tf.make_with_dimorder( + {3, 5, 2, 2}, + {0.2432, 0.8184, 0.6138, 0.9680, 0.2257, 0.5248, 0.8206, 0.1112, 0.2548, + 0.8766, 0.5361, 0.7357, 0.2799, 0.0393, 0.2715, 0.8513, 0.9655, 0.1079, + 0.6002, 0.1595, 0.2029, 0.4405, 0.2446, 0.4439, 0.3491, 0.7026, 0.6560, + 0.6590, 0.5346, 0.6970, 0.6982, 0.9217, 0.3866, 0.3179, 0.8456, 0.8529, + 0.6372, 0.7185, 0.4492, 0.2516, 0.2345, 0.8530, 0.8923, 0.3007, 0.7203, + 0.2924, 0.8560, 0.5546, 0.3961, 0.8963, 0.7695, 0.6909, 0.6978, 0.8416, + 0.3597, 0.0911, 0.7719, 0.8151, 0.4296, 0.5552}, + /*dim_order=*/{0, 2, 3, 1}); + + Tensor ret = op__clone_dim_order_out( + /*self*/ x, + /*non_blocking*/ false, + /*dim_order*/ executorch::aten::nullopt, + out); + + EXPECT_TENSOR_EQ(out, expected); + EXPECT_TENSOR_EQ(ret, expected); +} diff --git a/kernels/test/targets.bzl b/kernels/test/targets.bzl index 60dabac1844..8ab55c170fd 100644 --- a/kernels/test/targets.bzl +++ b/kernels/test/targets.bzl @@ -177,6 +177,7 @@ def define_common_targets(): _common_op_test("op__to_dim_order_copy_test", ["aten", "portable"]) _common_op_test("op__empty_dim_order_test", ["aten", "portable"]) + _common_op_test("op__clone_dim_order_test", ["portable"]) _common_op_test("op_abs_test", ["aten", "portable"]) _common_op_test("op_acos_test", ["aten", "portable"]) _common_op_test("op_acosh_test", ["aten", "portable"]) diff --git a/shim_et/xplat/executorch/kernels/portable/op_registration_util.bzl b/shim_et/xplat/executorch/kernels/portable/op_registration_util.bzl index 73dfafdc65d..3df05b3651a 100644 --- a/shim_et/xplat/executorch/kernels/portable/op_registration_util.bzl +++ b/shim_et/xplat/executorch/kernels/portable/op_registration_util.bzl @@ -1329,6 +1329,13 @@ ATEN_OPS = ( "//executorch/kernels/portable/cpu/util:copy_ops_util", ], ), + op_target( + name = "op__clone_dim_order", + deps = [ + ":scalar_utils", + "//executorch/kernels/portable/cpu/util:copy_ops_util", + ], + ), ) # Operators that are not listed in `functions.yaml` (i.e., operators listed in