File tree Expand file tree Collapse file tree 1 file changed +6
-6
lines changed Expand file tree Collapse file tree 1 file changed +6
-6
lines changed Original file line number Diff line number Diff line change @@ -1119,24 +1119,24 @@ def _aqt_is_int8(aqt):
1119
1119
"""Check if an AffineQuantizedTensor is int8 quantized Tensor"""
1120
1120
return (
1121
1121
aqt .layout_tensor .dtype == torch .int8 and
1122
- aqt .quant_min is None or aqt .quant_min == - 128 and
1123
- aqt .quant_max is None or aqt .quant_max == 127
1122
+ ( aqt .quant_min is None or aqt .quant_min == - 128 ) and
1123
+ ( aqt .quant_max is None or aqt .quant_max == 127 )
1124
1124
)
1125
1125
1126
1126
def _aqt_is_int8_reduced_range (aqt ):
1127
1127
return (
1128
1128
aqt .layout_tensor .dtype == torch .int8 and
1129
- aqt .quant_min == - 127 and
1130
- aqt .quant_max is None or aqt .quant_max == 127
1129
+ ( aqt .quant_min is not None and aqt . quant_min == - 127 ) and
1130
+ ( aqt .quant_max is None or aqt .quant_max == 127 )
1131
1131
)
1132
1132
1133
1133
def _aqt_is_uint4 (aqt ):
1134
1134
"""Check if an AffineQuantizedTensor is uint4 quantized Tensor"""
1135
1135
# TODO: use torch.uint4
1136
1136
return (
1137
1137
aqt .layout_tensor .dtype == torch .int32 and
1138
- aqt .quant_min is None or aqt .quant_min == 0 and
1139
- aqt .quant_max is None or aqt .quant_max == 15
1138
+ ( aqt .quant_min is not None and aqt .quant_min == 0 ) and
1139
+ ( aqt .quant_max is not None and aqt .quant_max == 15 )
1140
1140
)
1141
1141
1142
1142
You can’t perform that action at this time.
0 commit comments