Skip to content

Commit 2a77457

Browse files
committed
Minor fix for logical operators precedence in _aqt_is_* checks.
1 parent bd264f9 commit 2a77457

File tree

1 file changed

+6
-6
lines changed

1 file changed

+6
-6
lines changed

torchao/dtypes/affine_quantized_tensor.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1119,24 +1119,24 @@ def _aqt_is_int8(aqt):
11191119
"""Check if an AffineQuantizedTensor is int8 quantized Tensor"""
11201120
return (
11211121
aqt.layout_tensor.dtype == torch.int8 and
1122-
aqt.quant_min is None or aqt.quant_min == -128 and
1123-
aqt.quant_max is None or aqt.quant_max == 127
1122+
(aqt.quant_min is None or aqt.quant_min == -128) and
1123+
(aqt.quant_max is None or aqt.quant_max == 127)
11241124
)
11251125

11261126
def _aqt_is_int8_reduced_range(aqt):
11271127
return (
11281128
aqt.layout_tensor.dtype == torch.int8 and
1129-
aqt.quant_min == -127 and
1130-
aqt.quant_max is None or aqt.quant_max == 127
1129+
(aqt.quant_min is not None and aqt.quant_min == -127) and
1130+
(aqt.quant_max is None or aqt.quant_max == 127)
11311131
)
11321132

11331133
def _aqt_is_uint4(aqt):
11341134
"""Check if an AffineQuantizedTensor is uint4 quantized Tensor"""
11351135
# TODO: use torch.uint4
11361136
return (
11371137
aqt.layout_tensor.dtype == torch.int32 and
1138-
aqt.quant_min is None or aqt.quant_min == 0 and
1139-
aqt.quant_max is None or aqt.quant_max == 15
1138+
(aqt.quant_min is not None and aqt.quant_min == 0) and
1139+
(aqt.quant_max is not None and aqt.quant_max == 15)
11401140
)
11411141

11421142

0 commit comments

Comments
 (0)