diff --git a/python/tvm/relax/frontend/torch/base_fx_graph_translator.py b/python/tvm/relax/frontend/torch/base_fx_graph_translator.py index 485b7c088a15..26cb16456fc5 100644 --- a/python/tvm/relax/frontend/torch/base_fx_graph_translator.py +++ b/python/tvm/relax/frontend/torch/base_fx_graph_translator.py @@ -1083,7 +1083,15 @@ def _linear(self, node: fx.Node) -> relax.Var: weight = args[1] bias = args[2] if len(args) > 2 else None return self.block_builder.emit(relax.op.linear(x, weight, bias, "float32")) - + + def _logsigmoid(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + neg_x = self.block_builder.emit(relax.op.negative(x)) + exp_neg_x = self.block_builder.emit(relax.op.exp(neg_x)) + add_one = self.block_builder.emit(relax.op.add(exp_neg_x, relax.const(1.0, dtype="float32"))) + log_val = self.block_builder.emit(relax.op.log(add_one)) + return self.block_builder.emit(relax.op.negative(log_val)) + def _max_pool1d_impl( self, x: relax.Expr, diff --git a/python/tvm/relax/frontend/torch/exported_program_translator.py b/python/tvm/relax/frontend/torch/exported_program_translator.py index 57a6577eaf4a..09bbbf614cd1 100644 --- a/python/tvm/relax/frontend/torch/exported_program_translator.py +++ b/python/tvm/relax/frontend/torch/exported_program_translator.py @@ -354,6 +354,7 @@ def create_convert_map( "log1p.default": self._log1p, "logical_not.default": self._unary_op(relax.op.logical_not), "log_softmax.int": self._log_softmax, + "logsigmoid.default": self._logsigmoid, "neg.default": self._unary_op(relax.op.negative), "pad.default": self._pad, "pixel_shuffle.default": self._pixel_shuffle, diff --git a/tests/python/relax/test_frontend_from_exported_program.py b/tests/python/relax/test_frontend_from_exported_program.py index dd04833e07b8..856eb0285a8e 100644 --- a/tests/python/relax/test_frontend_from_exported_program.py +++ b/tests/python/relax/test_frontend_from_exported_program.py @@ -734,6 +734,40 @@ def main( verify_model(LogSoftmax2(), example_args, {}, expected1) +def test_logsigmoid(): + class LogSigmoid(Module): + def __init__(self): + super().__init__() + self.ls = torch.nn.LogSigmoid() + + def forward(self, input): + return self.ls(input) + + class LogSigmoid2(Module): + def forward(self, input): + return torch.nn.functional.logsigmoid(input) + + @tvm.script.ir_module + class expected_logsigmoid: + @R.function + def main( + input: R.Tensor((1, 3, 10, 10), dtype="float32") + ) -> R.Tuple(R.Tensor((1, 3, 10, 10), dtype="float32")): + with R.dataflow(): + neg_input = R.negative(input) + exp_neg = R.exp(neg_input) + add_one = R.add(exp_neg, R.const(1.0, "float32")) + log_val = R.log(add_one) + result = R.negative(log_val) + gv: R.Tuple(R.Tensor((1, 3, 10, 10), dtype="float32")) = (result,) + R.output(gv) + return gv + + example_args = (torch.randn(1, 3, 10, 10, dtype=torch.float32),) + verify_model(LogSigmoid(), example_args, {}, expected_logsigmoid) + verify_model(LogSigmoid2(), example_args, {}, expected_logsigmoid) + + def test_prelu(): class Prelu1(Module): def __init__(self, num_parameters=1, alpha=0.25):