diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index 4c0b3875..b90dfbc7 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -15,6 +15,8 @@ * Simplifies the internal working of Williamson decomposition [(#366)](https://github.com/XanaduAI/thewalrus/pull/338). +* Improves the handling of an edge case in Takagi [(#373)](https://github.com/XanaduAI/thewalrus/pull/373). + ### Bug fixes ### Documentation diff --git a/thewalrus/decompositions.py b/thewalrus/decompositions.py index b96ffdbb..43967077 100644 --- a/thewalrus/decompositions.py +++ b/thewalrus/decompositions.py @@ -210,8 +210,10 @@ def takagi(A, svd_order=True): list_vals.sort(reverse=svd_order) sorted_ls, permutation = zip(*list_vals) return np.array(sorted_ls), Uc[:, np.array(permutation)] - - phi = np.angle(A[0, 0]) + # Find the element with the largest absolute value + pos = np.unravel_index(np.argmax(np.abs(A)), (n, n)) + # Use it to find whether the input is a global phase times a real matrix + phi = np.angle(A[pos]) Amr = np.real_if_close(np.exp(-1j * phi) * A) if np.isrealobj(Amr): vals, U = takagi(Amr, svd_order=svd_order) diff --git a/thewalrus/tests/test_decompositions.py b/thewalrus/tests/test_decompositions.py index 630eb32a..53975dd2 100644 --- a/thewalrus/tests/test_decompositions.py +++ b/thewalrus/tests/test_decompositions.py @@ -394,3 +394,24 @@ def test_real_degenerate(): rl, U = takagi(mat) assert np.allclose(U @ U.conj().T, np.eye(len(mat))) assert np.allclose(U @ np.diag(rl) @ U.T, mat) + + +@pytest.mark.parametrize("size", [10, 20, 100]) +def test_flat_phase(size): + """Test that the correct decomposition is obtained even if the first entry is 0""" + A = np.random.rand(size, size) + 1j * np.random.rand(size, size) + A += A.T + A[0, 0] = 0 + l, u = takagi(A) + assert np.allclose(A, u * l @ u.T) + + +def test_real_input_edge(): + """Adapted from https://math.stackexchange.com/questions/4418925/why-does-this-algorithm-for-the-takagi-factorization-fail-here""" + rng = np.random.default_rng(0) # Important for reproducibility + A = (rng.random((100, 100)) - 0.5) * 114 + A = A * A.T # make A symmetric + l, u = takagi(A) + # Now, reconstruct A, see + Ar = u * l @ u.T + assert np.allclose(A, Ar)