|
16 | 16 | import warnings as _warnings |
17 | 17 |
|
18 | 18 | import numpy as _np |
| 19 | +import scipy.linalg as _la |
19 | 20 | import scipy.stats as _stats |
20 | 21 |
|
21 | 22 | from pygsti import optimize as _opt |
@@ -654,16 +655,21 @@ def _project_hessian(self, hessian, nongauge_space, gauge_space, gradient=None): |
654 | 655 | # to transform H -> H' in another coordinate system v -> w = B @ v: |
655 | 656 | # v.T @ H @ v = some 2nd deriv = v.T @ B.T @ H' @ B @ v in another basis |
656 | 657 | # so H' = invB.T @ H @ invB |
657 | | - assert(_np.allclose(hessian, hessian.T)) |
| 658 | + TOL = 1e-7 |
| 659 | + assert(_la.norm(hessian.imag) == 0) |
| 660 | + sym_err_abs = _la.norm(hessian - hessian.T) |
| 661 | + sym_err_rel = sym_err_abs / _la.norm(hessian) |
| 662 | + assert(sym_err_rel < TOL) |
| 663 | + hessian += hessian.T |
| 664 | + hessian /= 2 |
658 | 665 | invB = _np.concatenate([nongauge_space, gauge_space], axis=1) # takes (nongauge,guage) -> orig coords |
659 | 666 | B = _np.linalg.inv(invB) # takes orig -> (nongauge,gauge) coords |
660 | 667 | Hprime = invB.T @ hessian @ invB |
661 | | - #assert(_np.allclose(Hprime, Hprime.T)) # doesn't handle large magnituge Hessians well |
662 | | - assert(_np.linalg.norm(Hprime - Hprime.T) / _np.linalg.norm(Hprime) < 1e-7) |
| 668 | + assert(_la.norm(Hprime.imag) == 0) |
663 | 669 |
|
664 | 670 | if gradient is not None: # Check that Hprime is block-diagonal -- off-diag should be ~O(gradient) |
665 | 671 | coupling = Hprime[0:nongauge_space.shape[1], nongauge_space.shape[1]:] |
666 | | - if _np.linalg.norm(coupling) / (1e-6 + _np.linalg.norm(gradient)) > 5: |
| 672 | + if _np.linalg.norm(coupling) / (10*TOL + _np.linalg.norm(gradient)) > 5: |
667 | 673 | _warnings.warn("Gauge-nongauge mixed partials have unusually high magnitude: \n" |
668 | 674 | + "|off-diag blk| = %.2g should be ~ |gradient| = %.2g" % |
669 | 675 | (_np.linalg.norm(coupling), _np.linalg.norm(gradient))) |
@@ -1013,47 +1019,7 @@ def retrieve_profile_likelihood_confidence_intervals(self, label=None, component |
1013 | 1019 | raise ValueError(("Invalid item label (%s) for computing" % label) |
1014 | 1020 | + "profile likelihood confidence intervals") |
1015 | 1021 |
|
1016 | | - def compute_confidence_interval(self, fn_obj, eps=1e-7, |
1017 | | - return_fn_val=False, verbosity=0): |
1018 | | - """ |
1019 | | - Compute the confidence interval for an arbitrary function. |
1020 | | -
|
1021 | | - This "function", however, must be encapsulated as a |
1022 | | - `ModelFunction` object, which allows it to neatly specify |
1023 | | - what its dependencies are and allows it to compaute finite- |
1024 | | - different derivatives more efficiently. |
1025 | | -
|
1026 | | - Parameters |
1027 | | - ---------- |
1028 | | - fn_obj : ModelFunction |
1029 | | - An object representing the function to evaluate. The |
1030 | | - returned confidence interval is based on linearizing this function |
1031 | | - and propagating the model-space confidence region. |
1032 | | -
|
1033 | | - eps : float, optional |
1034 | | - Step size used when taking finite-difference derivatives of fnOfOp. |
1035 | | -
|
1036 | | - return_fn_val : bool, optional |
1037 | | - If True, return the value of fnOfOp along with it's confidence |
1038 | | - region half-widths. |
1039 | | -
|
1040 | | - verbosity : int, optional |
1041 | | - Specifies level of detail in standard output. |
1042 | | -
|
1043 | | - Returns |
1044 | | - ------- |
1045 | | - df : float or numpy array |
1046 | | - Half-widths of confidence intervals for each of the elements |
1047 | | - in the float or array returned by fnOfOp. Thus, shape of |
1048 | | - df matches that returned by fnOfOp. |
1049 | | - f0 : float or numpy array |
1050 | | - Only returned when return_fn_val == True. Value of fnOfOp |
1051 | | - at the gate specified by op_label. |
1052 | | - """ |
1053 | | - |
1054 | | - nParams = self.model.num_params |
1055 | | - f0 = fn_obj.evaluate(self.model) # function value at "base point" |
1056 | | - |
| 1022 | + def compute_grad_f(self, fn_obj, f0, nParams, eps=1e-7): |
1057 | 1023 | #Get finite difference derivative gradF that is shape (nParams, <shape of f0>) |
1058 | 1024 | gradF = _create_empty_grad_f(f0, nParams) |
1059 | 1025 |
|
@@ -1105,6 +1071,51 @@ def compute_confidence_interval(self, fn_obj, eps=1e-7, |
1105 | 1071 | assert(_np.linalg.norm(_np.imag(f - f0)) < 1e-12 or _np.iscomplexobj(gradF) |
1106 | 1072 | ), "gradF seems to be the wrong type!" |
1107 | 1073 | gradF[igp] = _np.real_if_close(f - f0) / eps |
| 1074 | + return gradF |
| 1075 | + |
| 1076 | + def compute_confidence_interval(self, fn_obj, eps=1e-7, |
| 1077 | + return_fn_val=False, verbosity=0): |
| 1078 | + """ |
| 1079 | + Compute the confidence interval for an arbitrary function. |
| 1080 | +
|
| 1081 | + This "function", however, must be encapsulated as a |
| 1082 | + `ModelFunction` object, which allows it to neatly specify |
| 1083 | + what its dependencies are and allows it to compaute finite- |
| 1084 | + different derivatives more efficiently. |
| 1085 | +
|
| 1086 | + Parameters |
| 1087 | + ---------- |
| 1088 | + fn_obj : ModelFunction |
| 1089 | + An object representing the function to evaluate. The |
| 1090 | + returned confidence interval is based on linearizing this function |
| 1091 | + and propagating the model-space confidence region. |
| 1092 | +
|
| 1093 | + eps : float, optional |
| 1094 | + Step size used when taking finite-difference derivatives of fnOfOp. |
| 1095 | +
|
| 1096 | + return_fn_val : bool, optional |
| 1097 | + If True, return the value of fnOfOp along with it's confidence |
| 1098 | + region half-widths. |
| 1099 | +
|
| 1100 | + verbosity : int, optional |
| 1101 | + Specifies level of detail in standard output. |
| 1102 | +
|
| 1103 | + Returns |
| 1104 | + ------- |
| 1105 | + df : float or numpy array |
| 1106 | + Half-widths of confidence intervals for each of the elements |
| 1107 | + in the float or array returned by fnOfOp. Thus, shape of |
| 1108 | + df matches that returned by fnOfOp. |
| 1109 | + f0 : float or numpy array |
| 1110 | + Only returned when return_fn_val == True. Value of fnOfOp |
| 1111 | + at the gate specified by op_label. |
| 1112 | + """ |
| 1113 | + |
| 1114 | + nParams = self.model.num_params |
| 1115 | + f0 = fn_obj.evaluate(self.model) # function value at "base point" |
| 1116 | + |
| 1117 | + #Get finite difference derivative gradF that is shape (nParams, <shape of f0>) |
| 1118 | + gradF = self.compute_grad_f(fn_obj, f0, nParams, eps) |
1108 | 1119 |
|
1109 | 1120 | return self._compute_return_from_grad_f(gradF, f0, return_fn_val, verbosity) |
1110 | 1121 |
|
|
0 commit comments