1+ import numpy as np
2+ import itertools
3+
4+ from .ralgb5 import ralgb5
5+ from .utils import rad , mid , inf , sup , mag
6+
7+
8+ #############################################################################################################
9+ #############################################################################################################
10+ class BaseRecFun (object ):
11+
12+ @staticmethod
13+ def linear_penalty (x , linear_constraint ):
14+ C , b = linear_constraint .C , linear_constraint .b
15+ mu = linear_constraint .mu
16+
17+ n , m = C .shape
18+ # the arrays to store the values of the penalty function
19+ # and its Jacobian are initialized.
20+ arr_p , arr_dp = np .zeros (n ), np .zeros ((n , m ))
21+ for i in range (n ):
22+ #the condition that the vector x is within the specified bounds is tested.
23+ Cix , beyondQ = linear_constraint .largeCondQ (x , i )
24+ if beyondQ :
25+ arr_p [i ] = Cix - b [i ]
26+ arr_dp [i ] = C [i ]
27+
28+ #the final value of the penalty function and its gradient vector are obtained.
29+ p = mu * np .sum (arr_p )
30+ dp = mu * np .sum (arr_dp , axis = 0 )
31+ return p , dp
32+
33+
34+ @staticmethod
35+ def optimize (A , b , recfunc , x0 = None , weight = None , linear_constraint = None , ** kwargs ):
36+
37+ n , m = A .shape
38+ assert n == len (b ), 'Inconsistent dimensions of matrix and right-hand side vector'
39+
40+
41+ infA , supA = inf (A ), sup (A )
42+ Am , Ar = mid (A ), rad (A )
43+ bm , br = mid (b ), rad (b )
44+
45+ if weight is None :
46+ weight = np .ones (n )
47+
48+ # для штрафной функции alpha = sum G x - c, где G-матрица ограничений, c-вектор ограничений
49+ # находим значение весового коэффициента mu, чтобы гарантировано не выходить за пределы ограничений
50+ if linear_constraint is None :
51+ calcfg = lambda x : recfunc .calcfg (x , infA , supA , Am , Ar , bm , br , weight )
52+ else :
53+ if linear_constraint .mu is None :
54+ mag_value = mag (A )
55+ linear_constraint .mu = linear_constraint .find_mu (np .max (mag_value ))
56+
57+ calcfg = lambda x : recfunc .calcfg_constr (x , infA , supA , Am , Ar , bm , br , weight , linear_constraint )
58+
59+
60+ if x0 is None :
61+ Ac = np .array (Am , dtype = np .float64 )
62+ bc = np .array (bm , dtype = np .float64 )
63+
64+ sv = np .linalg .svd (Ac , compute_uv = False )
65+ minsv , maxsv = np .min (sv ), np .max (sv )
66+
67+ if (minsv != 0 and maxsv / minsv < 1e15 ):
68+ x0 = np .linalg .lstsq (Ac , bc , rcond = - 1 )[0 ]
69+ else :
70+ x0 = np .zeros (m )
71+ else :
72+ x0 = np .copy (x0 )
73+
74+ return ralgb5 (calcfg , x0 , ** kwargs )
75+
76+
77+ @classmethod
78+ def constituent (cls , A , b , x , weight = None ):
79+ """
80+ The function computes all the formings of the recognizing functional and returns them.
81+
82+ Parameters:
83+
84+ A: Interval
85+ The input interval matrix of ISLAE, which can be either square or rectangular.
86+
87+ b: Interval
88+ The interval vector of the right part of the ISLAE.
89+
90+ x: np.array, optional
91+ The point at which the recognizing functional is calculated.
92+
93+ weight: float, np.array, optional
94+ The vector of weight coefficients for each forming of the recognizing functional.
95+ By default, it is a vector consisting of ones.
96+
97+
98+ Returns:
99+
100+ out: float
101+ The values of each forming of the recognizing functional at the point x are returned.
102+ """
103+ return cls ._constituent (A , b , x , weight = weight )
104+
105+
106+ @classmethod
107+ def value (cls , A , b , x , weight = None ):
108+ """
109+ The function computes the value of the recognizing functional at the point x.
110+
111+ Parameters:
112+
113+ A: Interval
114+ The input interval matrix of ISLAE, which can be either square or rectangular.
115+
116+ b: Interval
117+ The interval vector of the right part of the ISLAE.
118+
119+ x: np.array, optional
120+ The point at which the recognizing functional is calculated.
121+
122+ weight: float, np.array, optional
123+ The vector of weight coefficients for each forming of the recognizing functional.
124+ By default, it is a vector consisting of ones.
125+
126+
127+ Returns:
128+
129+ out: float
130+ The value of the recognizing functional at the point x.
131+ """
132+ return cls ._value (A , b , x , weight = weight )
133+
134+
135+ @classmethod
136+ def maximize (cls , A , b , x0 = None , weight = None , linear_constraint = None , ** kwargs ):
137+ """
138+ The function is intended for finding the global maximum of the recognizing functional.
139+ The ralgb5 subgradient method is used for optimization.
140+
141+ Parameters:
142+
143+ A: Interval
144+ The input interval matrix of ISLAE, which can be either square or rectangular.
145+
146+ b: Interval
147+ The interval vector of the right part of the ISLAE.
148+
149+ x0: np.array, optional
150+ The initial assumption is at what point the maximum is reached. By default, x0
151+ is equal to the vector which is the solution (pseudo-solution) of the system
152+ mid(A) x = mid(b).
153+
154+ weight: float, np.array, optional
155+ The vector of weight coefficients for each forming of the recognizing functional.
156+ By default, it is a vector consisting of ones.
157+
158+ linear_constraint: LinearConstraint, optional
159+ System (lb <= C <= ub) describing linear dependence between parameters.
160+ By default, the problem of unconditional maximization is being solved.
161+
162+ kwargs: optional params
163+ The ralgb5 function uses additional parameters to adjust its performance.
164+ These parameters include the step size, the stopping criteria, the maximum number
165+ of iterations and others. Specified in the function description ralgb5.
166+
167+
168+ Returns:
169+
170+ out: tuple
171+ The function returns the following values in the specified order:
172+ 1. the vector solution at which the recognition functional reaches its maximum,
173+ 2. the value of the recognition functional,
174+ 3. the number of iterations taken by the algorithm,
175+ 4. the number of calls to the calcfg function,
176+ 5. the exit code of the algorithm (1 = tolf, 2 = tolg, 3 = tolx, 4 = maxiter, 5 = error).
177+ """
178+ return cls ._maximize (A , b , x0 = x0 , weight = weight , linear_constraint = linear_constraint , ** kwargs )
179+
180+
181+ #############################################################################################################
182+ #############################################################################################################
183+ class LinearConstraint :
184+ """
185+ Linear constraint on the variables.
186+
187+ The constraint has the general inequality form:
188+ lb <= C <= ub
189+
190+
191+ Parameters:
192+ C: array_like, shape (n, m)
193+ Matrix defining the constraint.
194+
195+ lb: array_like, shape (n, ), optional
196+ Lower limits on the constraint. Defaults to lb = -np.inf (no limits).
197+
198+ ub: array_like, shape (n, ), optional
199+ Upper limits on the constraint. Defaults to ub = np.inf (no limits).
200+
201+ mu: float
202+ The weighting factor by which the penalty is multiplied. Default value is None.
203+ """
204+
205+
206+ def __init__ (self , C , lb = None , ub = None , mu = None ):
207+ # TODO
208+ # идёт преобразование C x <= b
209+ # надо удалить все строки, где значение inf, а также, где нет зависимости от x
210+
211+ assert C .shape [0 ] >= 1 , 'Inconsistent dimension of matrix'
212+ if (not lb is None ) or (not ub is None ):
213+ self .C , self .b = [], []
214+ if (not lb is None ):
215+ assert C .shape [0 ] == len (lb ), 'Inconsistent dimensions of matrix and left-hand side vector'
216+ for k in range (C .shape [0 ]):
217+ if abs (lb [k ]) != np .inf and C [k ].any ():
218+ self .C .append (- C [k ])
219+ self .b .append (- lb [k ])
220+
221+ if (not ub is None ):
222+ assert C .shape [0 ] == len (ub ), 'Inconsistent dimensions of matrix and left-hand side vector'
223+ for k in range (C .shape [0 ]):
224+ if abs (ub [k ]) != np .inf and C [k ].any ():
225+ self .C .append (C [k ])
226+ self .b .append (ub [k ])
227+
228+ n = len (self .C )
229+ if n == 0 :
230+ self .C .append (C [0 ])
231+ self .b .append (ub [0 ])
232+ else :
233+ w = np .random .uniform (1 , 2 , n )
234+ # TODO
235+ # переписать более оптимальным способом
236+ W = np .zeros ( (n , C .shape [1 ]), dtype = np .float64 )
237+ for k in range (W .shape [1 ]):
238+ W [:, k ] = w
239+
240+ self .C , self .b = np .array (self .C )* W , np .array (self .b )* w
241+
242+
243+ else :
244+ self .C , self .b = C [0 ], np .array ([np .inf ])
245+
246+ self .mu = mu
247+
248+ def largeCondQ (self , x , i ):
249+ Cix = self .C [i ] @ x
250+ return Cix , Cix > self .b [i ]
251+
252+ def find_mu (self , numerator ):
253+ # solve |sum(C[:, k] * x)| -> min, for x
254+ # sum(x) >= 1, x_i \in {0, 1} \forall i = 1,..., len(C)
255+ denominator = []
256+ for c in self .C .T :
257+ c = c [ c != 0 ]
258+ n = c .shape [0 ]
259+ if n == 0 : continue
260+
261+ dot = np .zeros (2 ** n )
262+ k = 0
263+ for x in itertools .product ([0 , 1 ], repeat = n ):
264+ dot [k ] = c @ x
265+ k += 1
266+ denominator .append (min (abs (dot [1 :])))
267+
268+ self .mu = numerator / min (denominator )
269+ return self .mu
0 commit comments