Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@ torch_xla/csrc/XLANativeFunctions.h
torch_xla/csrc/RegisterXLA.cpp
torch_xla/csrc/RegisterAutogradXLA.cpp

# Directory autogenerated by full_codegen
torch_xla/csrc/generated/
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I noticed the new generated directory getting tracked by git, so I've added to the ignore list. Let me know if this is okay.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks!


# BEGIN NOT-CLEAN-FILES (setup.py handles this marker. Do not change.)
#
# Below files are not deleted by "setup.py clean".
Expand Down
11 changes: 0 additions & 11 deletions torch_xla/csrc/aten_xla_type.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -517,17 +517,6 @@ at::Tensor XLANativeFunctions::_unsafe_view(const at::Tensor& self,
return view(self, size);
}

at::Tensor XLANativeFunctions::acos(const at::Tensor& self) {
XLA_FN_COUNTER("xla::");
return bridge::AtenFromXlaTensor(XLATensor::acos(bridge::GetXlaTensor(self)));
}

at::Tensor XLANativeFunctions::acosh(const at::Tensor& self) {
XLA_FN_COUNTER("xla::");
return bridge::AtenFromXlaTensor(
XLATensor::acosh(bridge::GetXlaTensor(self)));
}

at::Tensor XLANativeFunctions::add(const at::Tensor& self,
const at::Tensor& other,
const at::Scalar& alpha) {
Expand Down
2 changes: 0 additions & 2 deletions torch_xla/csrc/ops/ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,6 @@ namespace torch_xla {
std::move(lower_fn)); \
}

PTXLA_UNARY_OP(Acos, at::aten::acos, xla::Acos);
PTXLA_UNARY_OP(Acosh, at::aten::acosh, xla::Acosh);
PTXLA_UNARY_OP(Cos, at::aten::cos, xla::Cos);
PTXLA_UNARY_OP(Cosh, at::aten::cosh, xla::Cosh);
PTXLA_UNARY_OP(Asin, at::aten::asin, xla::Asin);
Expand Down
11 changes: 11 additions & 0 deletions torch_xla/csrc/ops/ops_lower_fn.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#include <torch_xla/csrc/generated/LazyIr.h>

#include "tensorflow/compiler/xla/client/lib/math.h"
#include "torch_xla/csrc/elementwise.h"
#include "torch_xla/csrc/helpers.h"

Expand All @@ -10,6 +11,16 @@ torch_xla::XlaOpVector Abs::Lower(LoweringContext* loctx) const {
return ReturnOp(BuildAbs(xla_input), loctx);
}

torch_xla::XlaOpVector Acos::Lower(LoweringContext* loctx) const {
xla::XlaOp xla_input = loctx->GetOutputOp(operand(0));
return ReturnOp(xla::Acos(xla_input), loctx);
}

torch_xla::XlaOpVector Acosh::Lower(LoweringContext* loctx) const {
xla::XlaOp xla_input = loctx->GetOutputOp(operand(0));
return ReturnOp(xla::Acosh(xla_input), loctx);
}

torch_xla::XlaOpVector Maximum::Lower(LoweringContext* loctx) const {
xla::XlaOp xla_input = loctx->GetOutputOp(operand(0));
xla::XlaOp xla_other = loctx->GetOutputOp(operand(1));
Expand Down
4 changes: 4 additions & 0 deletions torch_xla/csrc/ops/ops_xla_shape_fn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@ namespace torch_xla {

xla::Shape AbsOutputShape(const XlaValue& input) { return input.xla_shape(); }

xla::Shape AcosOutputShape(const XlaValue& input) { return input.xla_shape(); }

xla::Shape AcoshOutputShape(const XlaValue& input) { return input.xla_shape(); }

xla::Shape MaximumOutputShape(const XlaValue& input, const XlaValue& other) {
auto lower_for_shape_fn =
[&](absl::Span<const xla::XlaOp> operands) -> xla::XlaOp {
Expand Down
4 changes: 4 additions & 0 deletions torch_xla/csrc/ops/ops_xla_shape_fn.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,10 @@ namespace torch_xla {

xla::Shape AbsOutputShape(const XlaValue& input);

xla::Shape AcosOutputShape(const XlaValue& input);

xla::Shape AcoshOutputShape(const XlaValue& input);

xla::Shape MaximumOutputShape(const XlaValue& input, const XlaValue& other);

} // namespace torch_xla
4 changes: 0 additions & 4 deletions torch_xla/csrc/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -334,10 +334,6 @@ class XLATensor : public c10::intrusive_ptr_target {

static XLATensor abs(const XLATensor& input);

static XLATensor acos(const XLATensor& input);

static XLATensor acosh(const XLATensor& input);

static XLATensor add(
const XLATensor& input, const XLATensor& other, const at::Scalar& alpha,
c10::optional<at::ScalarType> logical_element_type = c10::nullopt);
Expand Down
8 changes: 0 additions & 8 deletions torch_xla/csrc/tensor_methods.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -643,14 +643,6 @@ XLATensor XLATensor::abs(const XLATensor& input) {
input.GetIrValue(), std::vector<torch::lazy::Shape>()));
}

XLATensor XLATensor::acos(const XLATensor& input) {
return input.CreateFrom(Acos(input.GetIrValue()));
}

XLATensor XLATensor::acosh(const XLATensor& input) {
return input.CreateFrom(Acosh(input.GetIrValue()));
}

XLATensor XLATensor::add(const XLATensor& input, const XLATensor& other,
const at::Scalar& alpha,
c10::optional<at::ScalarType> logical_element_type) {
Expand Down
4 changes: 2 additions & 2 deletions xla_native_functions.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
backend: XLA
cpp_namespace: torch_xla
full_codegen:
- acos
- acosh
- abs
- maximum
supported:
Expand Down Expand Up @@ -30,8 +32,6 @@ supported:
- _to_cpu
- _trilinear
- _unsafe_view
- acos
- acosh
- adaptive_max_pool2d
- adaptive_max_pool2d_backward
- add.Scalar
Expand Down