16
16
17
17
18
18
def F (curve , w_t ):
19
- """ The F(γ) operator, defined as F(γ) = W(γ)/L(γ) .
19
+ """ The F(γ) operator, minimization target in the insertion step .
20
20
21
21
Parameters
22
22
----------
23
- curve : DGCG.classes.curve class
24
- w_t : DGCG.classes.dual_variable
23
+ curve : :py:class:`src.classes.curve`
24
+ Curve γ where the F operator is evaluated.
25
+ w_t : :py:class:`src.classes.dual_variable`
26
+ Dual variable that defines the F operator.
25
27
26
28
Returns
27
29
-------
28
- double number.
30
+ float
31
+
29
32
30
33
Notes
31
34
-----
32
- When solving the insertion step, this is the main energy to minimize.
35
+ The F operator is defined via the dual variable as
36
+
37
+ .. math::
38
+ F(\\ gamma) = -\\ frac{a_{\\ gamma}}{T+1} \\ sum_{t=0}^T w_t(\\ gamma(t))
39
+
33
40
"""
34
41
assert isinstance (curve , classes .curve ) and \
35
42
isinstance (w_t , classes .dual_variable )
@@ -41,19 +48,23 @@ def grad_F(curve, w_t):
41
48
42
49
Parameters
43
50
----------
44
- curve : DGCG.classes.curve class
45
- w_t : DGCG.classes.dual_variable
51
+ curve : :py:class:`src.classes.curve`
52
+ Curve γ where the F operator is evaluated.
53
+ w_t : :py:class:`src.classes.dual_variable`
54
+ Dual variable that defines the F operator.
46
55
47
56
Returns
48
57
-------
49
- double number.
58
+ :py:class:`src.classes.curve`
50
59
51
60
Notes
52
61
-----
53
- We use the gradient to minimize F(γ).
62
+ The F operator is defined on the Hilbert space of curves, therefore the
63
+ gradient should be a curve.
54
64
"""
55
65
assert isinstance (curve , classes .curve ) and \
56
66
isinstance (w_t , classes .dual_variable )
67
+ # F = W/L
57
68
L_gamma = curve .energy ()
58
69
W_gamma = - curve .integrate_against (w_t )
59
70
# ∇L(γ) computation
@@ -68,15 +79,13 @@ def grad_F(curve, w_t):
68
79
w_t_curve = lambda t : w_t .grad_eval (t , curve .eval_discrete (t )).reshape (2 )
69
80
T = config .T
70
81
grad_W_gamma = - np .array ([t_weigh [t ]* w_t_curve (t ) for t in range (T )])
71
- # grad_W_gamma = -np.array([config.time_weights[t] *
72
- # w_t.grad_eval(t, curve.eval_discrete(t)).reshape(2)
73
- # for t in range(config.T)])
74
82
# (L(γ)∇W(γ)-W(γ)∇L(γ))/L(γ)²
75
83
pos_gradient = (L_gamma * grad_W_gamma - W_gamma * grad_L_gamma )/ L_gamma ** 2
76
84
gradient_curve = classes .curve (pos_gradient )
77
85
return gradient_curve
78
86
79
- def after_optimization_sparsifier (current_measure , energy_curves = None ):
87
+
88
+ def after_optimization_sparsifier (current_measure ):
80
89
""" Trims a sparse measure by merging atoms that are too close.
81
90
82
91
Given a measure composed of atoms, it will look for the atoms that are
@@ -85,10 +94,8 @@ def after_optimization_sparsifier(current_measure, energy_curves=None):
85
94
86
95
Parameters
87
96
----------
88
- current_measure : DGCG.classes.measure class
89
- energy_curves : numpy.ndarray, optional
90
- vector indicating the energy of the curves of the measure. To
91
- accelerate the comparisons.
97
+ current_measure : :py:class:`src.classes.measure`
98
+ Target measure to trim.
92
99
93
100
Returns
94
101
-------
@@ -97,9 +104,13 @@ def after_optimization_sparsifier(current_measure, energy_curves=None):
97
104
Notes
98
105
-----
99
106
This method is required because the quadratic optimization step is realized
100
- by an interior point method. Therefore, it is likely to find minimums in
101
- between two identical items instead of selecting one and discarding the
102
- other.
107
+ by an interior point method. Therefore, in the case that there are repeated
108
+ (or very close to repeated) atoms in the current measure, the quadratic
109
+ optimization step can give positive weights to both of them.
110
+
111
+ This is not desirable, since besides incrementing the computing power for
112
+ the sliding step, we would prefer each atom numerically represented only
113
+ once.
103
114
"""
104
115
output_measure = copy .deepcopy (current_measure )
105
116
id1 = 0
@@ -140,6 +151,8 @@ def after_optimization_sparsifier(current_measure, energy_curves=None):
140
151
return output_measure
141
152
142
153
def solve_quadratic_program (current_measure ):
154
+ """
155
+ """
143
156
assert isinstance (current_measure , classes .measure )
144
157
# Build the quadratic system of step 5 and then use some generic python
145
158
# solver to get a solution.
@@ -178,32 +191,6 @@ def solve_quadratic_program(current_measure):
178
191
logger .status ([1 , 2 , 2 ], coefficients )
179
192
return curves_list , coefficients
180
193
181
- def to_positive_semidefinite (Q ):
182
- """ Takes a symmetric matrix and returns a positive semidefinite projection
183
-
184
- Parameters
185
- ----------
186
- Q : numpy.ndarray
187
- symmetric matrix
188
-
189
- Returns
190
- -------
191
- numpy.ndarray, symmetric positive semidefinite matrix.
192
- """
193
- min_eigval = 0
194
- eigval , eigvec = np .linalg .eigh (Q )
195
- if min (eigval ) < min_eigval :
196
- # truncate
197
- print ("Negative eigenvalues: " ,
198
- [eig for eig in eigval if eig < min_eigval ])
199
- eigval = np .maximum (eigval , min_eigval )
200
- # Recompute Q = VΣV^(-1)
201
- Q2 = np .linalg .solve (eigvec .T , np .diag (eigval )@eigvec .T ).T
202
- print ("PSD projection relative norm difference:" ,
203
- np .linalg .norm (Q - Q2 )/ np .linalg .norm (Q ))
204
- return Q2
205
- return Q
206
-
207
194
def weight_optimization_step (current_measure ):
208
195
config .logger .status ([1 , 2 , 1 ])
209
196
# optimizes the coefficients for the current_measure
0 commit comments