hexa (UTC+1) | _______ TestKernelLinearOperatorLinOpReturn.test_solve_matrix_broadcast ________
self = <test.operators.test_kernel_linear_operator.TestKernelLinearOperatorLinOpReturn testMethod=test_solve_matrix_broadcast>
def test_solve_matrix_broadcast(self):
linear_op = self.create_linear_op()
# Right hand size has one more batch dimension
batch_shape = torch.Size((3, *linear_op.batch_shape))
rhs = torch.randn(*batch_shape, linear_op.size(-1), 5)
self._test_solve(rhs)
if linear_op.ndimension() > 2:
# Right hand size has one fewer batch dimension
batch_shape = torch.Size(linear_op.batch_shape[1:])
rhs = torch.randn(*batch_shape, linear_op.size(-1), 5)
> self._test_solve(rhs)
linear_operator/test/linear_operator_test_case.py:1115:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
linear_operator/test/linear_operator_test_case.py:615: in _test_solve
self.assertAllClose(arg.grad, arg_copy.grad, **self.tolerances["grad"])
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <test.operators.test_kernel_linear_operator.TestKernelLinearOperatorLinOpReturn testMethod=test_solve_matrix_broadcast>
tensor1 = tensor([[[[ 1.8514e+04, 7.1797e+03, -1.1073e+04, -6.6690e+03, 1.2985e+04,
6.8468e+03],
[ 1.685... -3.0153e+04],
[-9.0042e+04, -1.3429e+04, -3.1822e+04, 1.3839e+04, 5.9735e+04,
-5.4315e+04]]]])
tensor2 = tensor([[[[ 1.8514e+04, 7.1797e+03, -1.1073e+04, -6.6690e+03, 1.2985e+04,
6.8468e+03],
[ 1.685... -3.0153e+04],
[-9.0042e+04, -1.3429e+04, -3.1822e+04, 1.3839e+04, 5.9735e+04,
-5.4315e+04]]]])
rtol = 0.03, atol = 1e-05, equal_nan = False
def assertAllClose(self, tensor1, tensor2, rtol=1e-4, atol=1e-5, equal_nan=False):
if not tensor1.shape == tensor2.shape:
raise ValueError(f"tensor1 ({tensor1.shape}) and tensor2 ({tensor2.shape}) do not have the same shape.")
if torch.allclose(tensor1, tensor2, rtol=rtol, atol=atol, equal_nan=equal_nan):
return True
if not equal_nan:
if not torch.equal(tensor1, tensor1):
raise AssertionError(f"tensor1 ({tensor1.shape}) contains NaNs")
if not torch.equal(tensor2, tensor2):
raise AssertionError(f"tensor2 ({tensor2.shape}) contains NaNs")
rtol_diff = (torch.abs(tensor1 - tensor2) / torch.abs(tensor2)).view(-1)
rtol_diff = rtol_diff[torch.isfinite(rtol_diff)]
rtol_max = rtol_diff.max().item()
atol_diff = (torch.abs(tensor1 - tensor2) - torch.abs(tensor2).mul(rtol)).view(-1)
atol_diff = atol_diff[torch.isfinite(atol_diff)]
atol_max = atol_diff.max().item()
> raise AssertionError(
f"tensor1 ({tensor1.shape}) and tensor2 ({tensor2.shape}) are not close enough. \n"
f"max rtol: {rtol_max:0.8f}\t\tmax atol: {atol_max:0.8f}"
)
E AssertionError: tensor1 (torch.Size([2, 3, 4, 6])) and tensor2 (torch.Size([2, 3, 4, 6])) are not close enough.
E max rtol: 0.03577567 max atol: 0.00741313
linear_operator/test/base_test_case.py:46: AssertionError
| 11:40:36 |