|
| 1 | +#include <vector> |
| 2 | + |
| 3 | +#include "cpp_test_util.h" |
| 4 | +#include "torch_xla/csrc/tensor_util.h" |
| 5 | + |
| 6 | +namespace torch_xla { |
| 7 | +namespace cpp_test { |
| 8 | + |
| 9 | +TEST(XLABackendTest, TestTensorTransfer) { |
| 10 | + torch::lazy::BackendImplInterface* impl = GetXlaBackendImpl(); |
| 11 | + at::Tensor input = at::randint(std::numeric_limits<uint8_t>::min(), |
| 12 | + std::numeric_limits<uint8_t>::max(), {2, 2}, |
| 13 | + at::TensorOptions(at::kByte)); |
| 14 | + ForEachDevice([&](const torch::lazy::BackendDevice& device) { |
| 15 | + torch::lazy::BackendDataPtr data = impl->MakeComputationDataFromTensor( |
| 16 | + input, torch::lazy::Shape(input.scalar_type(), input.sizes()), device); |
| 17 | + at::Tensor res = impl->MakeTensorFromComputationData(data, at::kByte); |
| 18 | + AllClose(input, res); |
| 19 | + }); |
| 20 | +} |
| 21 | + |
| 22 | +TEST(XLABackendTest, TestScalarTransfer) { |
| 23 | + torch::lazy::BackendImplInterface* impl = GetXlaBackendImpl(); |
| 24 | + at::Scalar input = at::Scalar(int64_t(1)); |
| 25 | + ForEachDevice([&](const torch::lazy::BackendDevice& device) { |
| 26 | + torch::lazy::BackendDataPtr data = |
| 27 | + impl->MakeComputationDataFromScalar(input, device); |
| 28 | + at::Tensor res = impl->MakeTensorFromComputationData(data, at::kByte); |
| 29 | + AllClose(at::ones({}, at::TensorOptions(at::kByte)), res); |
| 30 | + }); |
| 31 | +} |
| 32 | + |
| 33 | +TEST(XLABackendTest, TestPlaceholder) { |
| 34 | + torch::lazy::BackendImplInterface* impl = GetXlaBackendImpl(); |
| 35 | + torch::lazy::Shape shape(at::kFloat, {10, 10}); |
| 36 | + ForEachDevice([&](const torch::lazy::BackendDevice& device) { |
| 37 | + torch::lazy::BackendDataPtr data = |
| 38 | + impl->CreateDataPlaceholder(device, shape); |
| 39 | + xla::ComputationClient::DataPtr computation_data = UnwrapXlaData(data); |
| 40 | + EXPECT_EQ(computation_data->device(), device.toString()); |
| 41 | + EXPECT_EQ(computation_data->shape(), |
| 42 | + MakeXlaShapeFromLazyShape(shape, device)); |
| 43 | + }); |
| 44 | +} |
| 45 | + |
| 46 | +xla::XlaComputation CreateAddComputation(const xla::Shape& shape) { |
| 47 | + xla::XlaBuilder builder("AddComputation"); |
| 48 | + xla::XlaOp x = xla::Parameter(&builder, 0, shape, "x"); |
| 49 | + xla::XlaOp y = xla::Parameter(&builder, 1, shape, "y"); |
| 50 | + xla::XlaOp sum = xla::Add(x, y); |
| 51 | + return ConsumeValue(builder.Build()); |
| 52 | +} |
| 53 | + |
| 54 | +TEST(XLABackendTest, TestE2E) { |
| 55 | + torch::lazy::BackendImplInterface* impl = GetXlaBackendImpl(); |
| 56 | + xla::Shape input_shape = |
| 57 | + xla::ShapeUtil::MakeShape(xla::PrimitiveType::F32, {8, 8}); |
| 58 | + at::Tensor one = at::ones({8, 8}, at::TensorOptions(at::kFloat)); |
| 59 | + std::vector<at::Tensor> tensors = {one, one}; |
| 60 | + |
| 61 | + ForEachDevice([&](const torch::lazy::BackendDevice& device) { |
| 62 | + xla::XlaComputation xla_computation = CreateAddComputation(input_shape); |
| 63 | + torch::lazy::ComputationPtr computation = |
| 64 | + std::make_shared<torch_xla::Computation>( |
| 65 | + "test", std::move(xla_computation), device); |
| 66 | + std::vector<torch::lazy::ComputationPtr> compiled_programs = |
| 67 | + impl->Compile({computation}); |
| 68 | + EXPECT_EQ(compiled_programs.size(), 1); |
| 69 | + |
| 70 | + std::vector<torch::lazy::BackendDataPtr> parameters; |
| 71 | + for (auto& tensor : tensors) { |
| 72 | + parameters.push_back(impl->MakeComputationDataFromTensor( |
| 73 | + tensor, torch::lazy::Shape(tensor.scalar_type(), tensor.sizes()), |
| 74 | + device)); |
| 75 | + } |
| 76 | + std::vector<torch::lazy::BackendDataPtr> res = |
| 77 | + impl->ExecuteComputation(compiled_programs[0], parameters, device); |
| 78 | + EXPECT_EQ(res.size(), 1); |
| 79 | + at::Tensor res_tensor = |
| 80 | + impl->MakeTensorFromComputationData(res[0], at::kFloat); |
| 81 | + AllClose(one + one, res_tensor); |
| 82 | + }); |
| 83 | +} |
| 84 | + |
| 85 | +} // namespace cpp_test |
| 86 | +} // namespace torch_xla |
0 commit comments