Skip to content

Commit 12b8829

Browse files
authored
Nick/tensor scale (#221)
* CP_ALS: Should work with TTensor with no changes * Update interface for mypy to check * Update simple tests to confirm * Tensor Scale: * Add scale and tests * Didn't add exclude dims, but could to match MATLAB * Skipped the function handle since that seems redudant with other functionality we have * Tensor Scale: * Forgot negative test * Tensor Scale: * Typo in my TODO
1 parent 7a5464d commit 12b8829

File tree

5 files changed

+150
-15
lines changed

5 files changed

+150
-15
lines changed

pyttb/cp_als.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212

1313

1414
def cp_als( # noqa: PLR0912,PLR0913,PLR0915
15-
input_tensor: Union[ttb.tensor, ttb.sptensor],
15+
input_tensor: Union[ttb.tensor, ttb.sptensor, ttb.ttensor],
1616
rank: int,
1717
stoptol: float = 1e-4,
1818
maxiters: int = 1000,

pyttb/sptensor.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1055,14 +1055,18 @@ def reshape(
10551055
tuple(np.concatenate((keep_shape, new_shape))),
10561056
)
10571057

1058-
def scale(self, factor: np.ndarray, dims: Union[float, np.ndarray]) -> sptensor:
1058+
def scale(
1059+
self,
1060+
factor: Union[np.ndarray, ttb.tensor, ttb.sptensor],
1061+
dims: Union[float, np.ndarray],
1062+
) -> sptensor:
10591063
"""
10601064
Scale along specified dimensions for sparse tensors
10611065
10621066
Parameters
10631067
----------
1064-
factor:
1065-
dims:
1068+
factor: Scaling factor
1069+
dims: Dimensions to scale
10661070
10671071
Returns
10681072
-------

pyttb/tensor.py

Lines changed: 68 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -401,7 +401,7 @@ def find(self) -> Tuple[np.ndarray, np.ndarray]:
401401
[3 4]]
402402
>>> T_threshold = T > 2
403403
>>> subs, vals = T_threshold.find()
404-
>>> subs
404+
>>> subs.astype(int)
405405
array([[1, 0],
406406
[1, 1]])
407407
>>> vals
@@ -1019,6 +1019,73 @@ def reshape(self, shape: Tuple[int, ...]) -> tensor:
10191019

10201020
return ttb.tensor(np.reshape(self.data, shape, order="F"), shape)
10211021

1022+
def scale(
1023+
self,
1024+
factor: Union[np.ndarray, ttb.tensor],
1025+
dims: Union[float, np.ndarray],
1026+
) -> tensor:
1027+
"""
1028+
Scale along specified dimensions for tensors.
1029+
1030+
Parameters
1031+
----------
1032+
factor: Scaling factor
1033+
dims: Dimensions to scale
1034+
1035+
Returns
1036+
-------
1037+
Scaled Tensor.
1038+
1039+
Examples
1040+
--------
1041+
>>> T = ttb.tenones((3, 4, 5))
1042+
>>> S = np.arange(5)
1043+
>>> Y = T.scale(S, 2)
1044+
>>> Y.data[0, 0, :]
1045+
array([0., 1., 2., 3., 4.])
1046+
>>> S = ttb.tensor(np.arange(5))
1047+
>>> Y = T.scale(S, 2)
1048+
>>> Y.data[0, 0, :]
1049+
array([0., 1., 2., 3., 4.])
1050+
>>> S = ttb.tensor(np.arange(12), shape=(3, 4))
1051+
>>> Y = T.scale(S, [0, 1])
1052+
>>> Y.data[:, :, 0]
1053+
array([[ 0., 3., 6., 9.],
1054+
[ 1., 4., 7., 10.],
1055+
[ 2., 5., 8., 11.]])
1056+
"""
1057+
if isinstance(dims, list):
1058+
dims = np.array(dims)
1059+
elif isinstance(dims, (float, int, np.generic)):
1060+
dims = np.array([dims])
1061+
1062+
# TODO update tt_dimscheck overload so I don't need explicit
1063+
# Nones to appease mypy
1064+
dims, _ = tt_dimscheck(self.ndims, None, dims, None)
1065+
remdims = np.setdiff1d(np.arange(0, self.ndims), dims)
1066+
1067+
if not np.array_equal(factor.shape, np.array(self.shape)[dims]):
1068+
raise ValueError(
1069+
f"Scaling factor has shape {factor.shape}, but dimensions "
1070+
f"to scale had shape {np.array(self.shape)[dims]}"
1071+
)
1072+
if isinstance(factor, np.ndarray):
1073+
if len(factor.shape) == 1:
1074+
factor = factor[:, None]
1075+
factor = ttb.tensor(factor, copy=False)
1076+
# TODO this should probably be doable directly as a numpy view
1077+
# where I think this is currently a copy
1078+
vector_factor = ttb.tenmat.from_tensor_type(
1079+
factor, np.arange(factor.ndims)
1080+
).double()
1081+
vector_self = ttb.tenmat.from_tensor_type(self, dims, remdims).double()
1082+
# Numpy broadcasting should be equivalent to bsxfun
1083+
result = vector_self * vector_factor
1084+
# TODO why do we need this transpose for things to work?
1085+
if len(dims) == 1:
1086+
result = result.transpose()
1087+
return ttb.tenmat.from_data(result, dims, remdims, self.shape).to_tensor()
1088+
10221089
def squeeze(self) -> Union[tensor, np.ndarray, float]:
10231090
"""
10241091
Removes singleton dimensions from the tensor.

tests/test_cp_als.py

Lines changed: 50 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -27,12 +27,37 @@ def sample_sptensor():
2727
return data, sptensorInstance
2828

2929

30+
@pytest.fixture()
31+
def sample_ttensor():
32+
"""Simple TTENSOR to verify by hand"""
33+
core = ttb.tensor(np.ones((2, 3)))
34+
factors = [
35+
np.ones((5, 2)),
36+
np.ones((2, 3)),
37+
]
38+
ttensorInstance = ttb.ttensor(core, factors)
39+
return ttensorInstance
40+
41+
42+
@pytest.fixture()
43+
def random_ttensor():
44+
"""Arbitrary TTENSOR to verify consistency between alternative operations"""
45+
core = ttb.tensor(np.random.random((2, 3, 4)))
46+
factors = [
47+
np.random.random((5, 2)),
48+
np.random.random((2, 3)),
49+
np.random.random((4, 4)),
50+
]
51+
ttensorInstance = ttb.ttensor(core, factors)
52+
return ttensorInstance
53+
54+
3055
@pytest.mark.indevelopment
3156
def test_cp_als_tensor_default_init(capsys, sample_tensor):
3257
(data, T) = sample_tensor
3358
(M, Minit, output) = ttb.cp_als(T, 2)
3459
capsys.readouterr()
35-
assert pytest.approx(output["fit"], 1) == 0
60+
assert pytest.approx(output["fit"]) == 1
3661

3762

3863
@pytest.mark.indevelopment
@@ -49,7 +74,7 @@ def test_cp_als_tensor_ktensor_init(capsys, sample_tensor):
4974
KInit = ttb.ktensor.from_function(np.random.random_sample, T.shape, 2)
5075
(M, Minit, output) = ttb.cp_als(T, 2, init=KInit)
5176
capsys.readouterr()
52-
assert pytest.approx(output["fit"], 1) == 0
77+
assert pytest.approx(output["fit"]) == 1
5378

5479

5580
@pytest.mark.indevelopment
@@ -76,7 +101,7 @@ def test_cp_als_sptensor_default_init(capsys, sample_sptensor):
76101
(data, T) = sample_sptensor
77102
(M, Minit, output) = ttb.cp_als(T, 2)
78103
capsys.readouterr()
79-
assert pytest.approx(output["fit"], 1) == 0
104+
assert pytest.approx(output["fit"]) == 1
80105

81106

82107
@pytest.mark.indevelopment
@@ -93,7 +118,26 @@ def test_cp_als_sptensor_ktensor_init(capsys, sample_sptensor):
93118
KInit = ttb.ktensor.from_function(np.random.random_sample, T.shape, 2)
94119
(M, Minit, output) = ttb.cp_als(T, 2, init=KInit)
95120
capsys.readouterr()
96-
assert pytest.approx(output["fit"], 1) == 0
121+
assert pytest.approx(output["fit"]) == 1
122+
123+
124+
@pytest.mark.indevelopment
125+
def test_cp_als_ttensor_default_init(capsys, sample_ttensor):
126+
T = sample_ttensor
127+
(M, Minit, output) = ttb.cp_als(T, 1)
128+
capsys.readouterr()
129+
assert pytest.approx(output["fit"]) == 1
130+
131+
132+
@pytest.mark.indevelopment
133+
def test_cp_als_ttensor_default_init_consistency(capsys, random_ttensor):
134+
T = random_ttensor
135+
KInit = ttb.ktensor.from_function(np.random.random_sample, T.shape, 2)
136+
_, _, output = ttb.cp_als(T, 2, init=KInit)
137+
capsys.readouterr()
138+
_, _, dense_output = ttb.cp_als(T.full(), 2, init=KInit)
139+
capsys.readouterr()
140+
assert pytest.approx(output["fit"]) == dense_output["fit"]
97141

98142

99143
@pytest.mark.indevelopment
@@ -102,19 +146,15 @@ def test_cp_als_tensor_dimorder(capsys, sample_tensor):
102146

103147
# default dimorder
104148
dimorder = [i for i in range(T.ndims)]
105-
print(dimorder)
106-
print(dimorder.__class__)
107149
(M, Minit, output) = ttb.cp_als(T, 2, dimorder=dimorder)
108150
capsys.readouterr()
109-
assert pytest.approx(output["fit"], 1) == 0
151+
assert pytest.approx(output["fit"]) == 1
110152

111153
# reverse should work
112154
dimorder = [T.ndims - i - 1 for i in range(T.ndims)]
113-
print(dimorder)
114-
print(dimorder.__class__)
115155
(M, Minit, output) = ttb.cp_als(T, 2, dimorder=dimorder)
116156
capsys.readouterr()
117-
assert pytest.approx(output["fit"], 1) == 0
157+
assert pytest.approx(output["fit"]) == 1
118158

119159
# dimorder not a list
120160
with pytest.raises(AssertionError) as excinfo:

tests/test_tensor.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1090,6 +1090,30 @@ def test_tensor_mask(sample_tensor_2way):
10901090
assert "Mask cannot be bigger than the data tensor" in str(excinfo)
10911091

10921092

1093+
def test_tensor_scale():
1094+
T = ttb.tenones((3, 4, 5))
1095+
S = np.arange(5, dtype=float)
1096+
Y = T.scale(S, 2)
1097+
assert np.array_equal(Y.data[0, 0, :], S)
1098+
1099+
S = ttb.tensor(np.arange(5, dtype=float))
1100+
Y = T.scale(S, 2)
1101+
assert np.array_equal(Y.data[0, 0, :], S.data)
1102+
1103+
S = ttb.tensor(np.arange(12, dtype=float), shape=(3, 4))
1104+
Y = T.scale(S, [0, 1])
1105+
assert np.array_equal(Y.data[:, :, 0], S.data)
1106+
1107+
S = ttb.tensor(np.arange(60, dtype=float), shape=(3, 4, 5))
1108+
Y = T.scale(S, [0, 1, 2])
1109+
assert np.array_equal(Y.data, S.data)
1110+
1111+
# Negative test
1112+
with pytest.raises(ValueError):
1113+
S = ttb.tensor(np.arange(60, dtype=float), shape=(3, 4, 5))
1114+
Y = T.scale(S, 0)
1115+
1116+
10931117
def test_tensor_squeeze(sample_tensor_2way):
10941118
(params, tensorInstance) = sample_tensor_2way
10951119

0 commit comments

Comments
 (0)