Skip to content

Commit bf50f6e

Browse files
committed
Merge branch 'remove-unused-function-defs' of https://github.com/pyGSTio/pyGSTi into remove-unused-function-defs
2 parents c19a9f4 + 8e5bde8 commit bf50f6e

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

68 files changed

+3936
-841
lines changed

pygsti/algorithms/core.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,7 @@
3131
from pygsti.modelmembers import states as _state
3232
from pygsti.circuits.circuitlist import CircuitList as _CircuitList
3333
from pygsti.baseobjs.resourceallocation import ResourceAllocation as _ResourceAllocation
34-
from pygsti.optimize.customlm import CustomLMOptimizer as _CustomLMOptimizer
35-
from pygsti.optimize.customlm import Optimizer as _Optimizer
34+
from pygsti.optimize.simplerlm import Optimizer as _Optimizer, SimplerLMOptimizer as _SimplerLMOptimizer
3635
from pygsti import forwardsims as _fwdsims
3736
from pygsti import layouts as _layouts
3837

@@ -619,7 +618,7 @@ def run_gst_fit_simple(dataset, start_model, circuits, optimizer, objective_func
619618
model : Model
620619
the best-fit model.
621620
"""
622-
optimizer = optimizer if isinstance(optimizer, _Optimizer) else _CustomLMOptimizer.cast(optimizer)
621+
optimizer = optimizer if isinstance(optimizer, _Optimizer) else _SimplerLMOptimizer.cast(optimizer)
623622
objective_function_builder = _objfns.ObjectiveFunctionBuilder.cast(objective_function_builder)
624623
array_types = optimizer.array_types + \
625624
objective_function_builder.compute_array_types(optimizer.called_objective_methods, start_model.sim)
@@ -666,7 +665,7 @@ def run_gst_fit(mdc_store, optimizer, objective_function_builder, verbosity=0):
666665
objfn_store : MDCObjectiveFunction
667666
the objective function and store containing the best-fit model evaluated at the best-fit point.
668667
"""
669-
optimizer = optimizer if isinstance(optimizer, _Optimizer) else _CustomLMOptimizer.cast(optimizer)
668+
optimizer = optimizer if isinstance(optimizer, _Optimizer) else _SimplerLMOptimizer.cast(optimizer)
670669
comm = mdc_store.resource_alloc.comm
671670
profiler = mdc_store.resource_alloc.profiler
672671
printer = VerbosityPrinter.create_printer(verbosity, comm)
@@ -843,7 +842,7 @@ def iterative_gst_generator(dataset, start_model, circuit_lists,
843842
(an "evaluated" model-dataset-circuits store).
844843
"""
845844
resource_alloc = _ResourceAllocation.cast(resource_alloc)
846-
optimizer = optimizer if isinstance(optimizer, _Optimizer) else _CustomLMOptimizer.cast(optimizer)
845+
optimizer = optimizer if isinstance(optimizer, _Optimizer) else _SimplerLMOptimizer.cast(optimizer)
847846
comm = resource_alloc.comm
848847
profiler = resource_alloc.profiler
849848
printer = VerbosityPrinter.create_printer(verbosity, comm)

pygsti/algorithms/gaugeopt.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -290,7 +290,7 @@ def gaugeopt_custom(model, objective_fn, gauge_group=None,
290290
gaugeGroupEl = gauge_group.compute_element(x0) # re-used element for evals
291291

292292
def _call_objective_fn(gauge_group_el_vec, oob_check=False):
293-
# Note: oob_check can be True if oob_check_interval>=1 is given to the custom_leastsq below
293+
# Note: oob_check can be True if oob_check_interval>=1 is given to the simplish_leastsq below
294294
gaugeGroupEl.from_vector(gauge_group_el_vec)
295295
return objective_fn(gaugeGroupEl, oob_check)
296296

@@ -309,7 +309,7 @@ def _call_jacobian_fn(gauge_group_el_vec):
309309
assert(_call_jacobian_fn is not None), "Cannot use 'ls' method unless jacobian is available"
310310
ralloc = _baseobjs.ResourceAllocation(comm) # FUTURE: plumb up a resource alloc object?
311311
test_f = _call_objective_fn(x0)
312-
solnX, converged, msg, _, _, _, _, _ = _opt.custom_leastsq(
312+
solnX, converged, msg, _, _, _, _ = _opt.simplish_leastsq(
313313
_call_objective_fn, _call_jacobian_fn, x0, f_norm2_tol=tol,
314314
jac_norm_tol=tol, rel_ftol=tol, rel_xtol=tol,
315315
max_iter=maxiter, resource_alloc=ralloc,

pygsti/baseobjs/label.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -817,6 +817,10 @@ def __reduce__(self):
817817
# Need to tell serialization logic how to create a new Label since it's derived
818818
# from the immutable tuple type (so cannot have its state set after creation)
819819
return (LabelStr, (str(self), self.time), None)
820+
821+
def __contains__(self, x):
822+
#need to get a string rep of the tested label.
823+
return str(x) in str(self)
820824

821825
def to_native(self):
822826
"""

pygsti/circuits/circuit.py

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -549,6 +549,24 @@ def _copy_init(self, labels, line_labels, editable, name='', stringrep=None, occ
549549

550550
return self
551551

552+
#pickle management functions
553+
def __getstate__(self):
554+
state_dict = self.__dict__
555+
#if state_dict.get('_hash', None) is not None:
556+
# del state_dict['_hash'] #don't store the hash, recompute at unpickling time
557+
return state_dict
558+
559+
def __setstate__(self, state_dict):
560+
for k, v in state_dict.items():
561+
self.__dict__[k] = v
562+
if self.__dict__['_static']:
563+
#reinitialize the hash
564+
if self.__dict__.get('_hashable_tup', None) is not None:
565+
self._hash = hash(self._hashable_tup)
566+
else: #legacy support
567+
self._hashable_tup = self.tup
568+
self._hash = hash(self._hashable_tup)
569+
552570

553571
def to_label(self, nreps=1):
554572
"""
@@ -636,7 +654,6 @@ def layertup(self):
636654
if self._static:
637655
return self._labels
638656
else:
639-
#return tuple([to_label(layer_lbl) for layer_lbl in self._labels])
640657
return tuple([layer_lbl if isinstance(layer_lbl, _Label)
641658
else _Label(layer_lbl) for layer_lbl in self._labels])
642659
@property

pygsti/circuits/cloudcircuitconstruction.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2164,7 +2164,7 @@ def bitstr(num_qubits, bit):
21642164
def invert(bstr):
21652165
return [(0 if x else 1) for x in bstr]
21662166

2167-
half = [bitstr(n, k) for k in range(int(_np.ceil(_np.math.log(n, 2))))]
2167+
half = [bitstr(n, k) for k in range(int(_np.ceil(_np.log2(n))))]
21682168
other_half = [invert(bstr) for bstr in half]
21692169
return half + other_half
21702170

pygsti/evotypes/densitymx/effectcreps.cpp

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ namespace CReps_densitymx {
147147
finalIndx += ((finds >> k) & 1) * 3 * base;
148148
base = base >> 2; // /= 4 so base == 4**(N-1-k)
149149
}
150-
150+
151151
//Apply result
152152
if(parity(finds & _zvals_int))
153153
ret -= _abs_elval * state->_dataptr[finalIndx]; // minus sign
@@ -157,15 +157,14 @@ namespace CReps_densitymx {
157157
return ret;
158158
}
159159

160-
INT EffectCRep_Computational::parity(INT x) {
161-
// int64-bit specific
162-
x = (x & 0x00000000FFFFFFFF)^(x >> 32);
163-
x = (x & 0x000000000000FFFF)^(x >> 16);
164-
x = (x & 0x00000000000000FF)^(x >> 8);
165-
x = (x & 0x000000000000000F)^(x >> 4);
166-
x = (x & 0x0000000000000003)^(x >> 2);
167-
x = (x & 0x0000000000000001)^(x >> 1);
168-
return x & 1; // return the last bit (0 or 1)
160+
inline INT EffectCRep_Computational::parity(INT x) {
161+
x ^= (x >> 32);
162+
x ^= (x >> 16);
163+
x ^= (x >> 8);
164+
x ^= (x >> 4);
165+
x ^= (x >> 2);
166+
x ^= (x >> 1);
167+
return x & 1; // Return the last bit
169168
}
170169

171170

pygsti/evotypes/evotype.py

Lines changed: 39 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import importlib as _importlib
22

33
from . import basereps as _basereps
4+
from pygsti.baseobjs.statespace import StateSpace as _StateSpace
45

56

67
class Evotype(object):
@@ -50,10 +51,46 @@ class Evotype(object):
5051
}
5152

5253
@classmethod
53-
def cast(cls, obj, default_prefer_dense_reps=False):
54+
def cast(cls, obj, default_prefer_dense_reps=None, state_space=None):
55+
"""
56+
Cast the specified object to an Evotype with options for default Evotype
57+
handling.
58+
59+
Parameters
60+
----------
61+
obj : Evotype or str
62+
Object to cast to an Evotype. If already an Evotype the object is simply
63+
returned. Otherwise if a string we attempt to cast it to a recognized
64+
evotype option. If the string "default" is passed in then we determine
65+
the type of evotype used in conjunction with the two optional kwargs below.
66+
67+
default_prefer_dense_reps : None or bool, optional (default None)
68+
Flag to indicate preference for dense representation types when casting
69+
a string. If None then there is no preference and this will be determined
70+
by the optional state_space kwarg, if present. Otherwise if a boolean value
71+
this selection overrides any logic based on the state space.
72+
73+
state_space : StateSpace, optional (default None)
74+
If not None then the dimension of the state space is used to determine whether
75+
or not to prefer the use of dense representation types when not already specified
76+
by the default_prefer_dense_reps kwarg.
77+
78+
Returns
79+
-------
80+
Evotype
81+
"""
5482
if isinstance(obj, Evotype):
5583
return obj
56-
elif obj == "default":
84+
85+
if default_prefer_dense_reps is None:
86+
if state_space is None:
87+
default_prefer_dense_reps = False #reproduces legacy behavior.
88+
else:
89+
if not isinstance(state_space, _StateSpace):
90+
raise ValueError('state_space must be a StateSpace object.')
91+
default_prefer_dense_reps = False if state_space.dim > 64 else True #HARDCODED
92+
93+
if obj == "default":
5794
return Evotype(cls.default_evotype, default_prefer_dense_reps)
5895
else: # assume obj is a string naming an evotype
5996
return Evotype(str(obj), default_prefer_dense_reps)

pygsti/forwardsims/mapforwardsim.py

Lines changed: 30 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,7 @@ def _array_types_for_method(cls, method_name):
159159
if method_name == 'bulk_fill_timedep_dchi2': return ('p',) # just an additional parameter vector
160160
return super()._array_types_for_method(method_name)
161161

162-
def __init__(self, model=None, max_cache_size=0, num_atoms=None, processor_grid=None, param_blk_sizes=None,
162+
def __init__(self, model=None, max_cache_size=None, num_atoms=None, processor_grid=None, param_blk_sizes=None,
163163
derivative_eps=1e-7, hessian_eps=1e-5):
164164
#super().__init__(model, num_atoms, processor_grid, param_blk_sizes)
165165
_DistributableForwardSimulator.__init__(self, model, num_atoms, processor_grid, param_blk_sizes)
@@ -195,7 +195,9 @@ def copy(self):
195195
self._processor_grid, self._pblk_sizes)
196196

197197
def create_layout(self, circuits, dataset=None, resource_alloc=None, array_types=('E',),
198-
derivative_dimensions=None, verbosity=0, layout_creation_circuit_cache=None):
198+
derivative_dimensions=None, verbosity=0, layout_creation_circuit_cache=None,
199+
circuit_partition_cost_functions=('size', 'propagations'),
200+
load_balancing_parameters=(1.15,.1)):
199201
"""
200202
Constructs an circuit-outcome-probability-array (COPA) layout for a list of circuits.
201203
@@ -226,11 +228,22 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, array_types
226228
Determines how much output to send to stdout. 0 means no output, higher
227229
integers mean more output.
228230
229-
layout_creation_circuit_cache:
230-
A precomputed dictionary serving as a cache for completed
231-
circuits. I.e. circuits with prep labels and POVM labels appended.
232-
Along with other useful pre-computed circuit structures used in layout
233-
creation.
231+
layout_creation_circuit_cache : dict, optional (default None)
232+
A precomputed dictionary serving as a cache for completed circuits. I.e. circuits
233+
with prep labels and POVM labels appended. Along with other useful pre-computed
234+
circuit structures used in layout creation.
235+
236+
circuit_partition_cost_functions : tuple of str, optional (default ('size', 'propagations'))
237+
A tuple of strings denoting cost function to use in each of the two stages of the algorithm
238+
for determining the partitions of the complete circuit set amongst atoms.
239+
Allowed options are 'size', which corresponds to balancing the number of circuits,
240+
and 'propagations', which corresponds to balancing the number of state propagations.
241+
242+
load_balancing_parameters : tuple of floats, optional (default (1.15, .1))
243+
A tuple of floats used as load balancing parameters when splitting a layout across atoms,
244+
as in the multi-processor setting when using MPI. These parameters correspond to the `imbalance_threshold`
245+
and `minimum_improvement_threshold` parameters described in the method `find_splitting_new`
246+
of the `PrefixTable` class.
234247
235248
Returns
236249
-------
@@ -256,15 +269,15 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, array_types
256269
raise MemoryError("Attempted layout creation w/memory limit = %g <= 0!" % mem_limit)
257270
printer.log("Layout creation w/mem limit = %.2fGB" % (mem_limit * C))
258271

259-
#Start with how we'd like to split processors up (without regard to memory limit):
260-
261-
# when there are lots of processors, the from_vector calls dominante over the actual fwdsim,
262-
# but we can reduce from_vector calls by having np1, np2 > 0 (each param requires a from_vector
263-
# call when using finite diffs) - so we want to choose nc = Ng < nprocs and np1 > 1 (so nc * np1 = nprocs).
264-
#work_per_proc = self.model.dim**2
272+
#Start with how we'd like to split processors up (without regard to memory limit):
273+
#The current implementation of map (should) benefit more from having a matching between the number of atoms
274+
#and the number of processors, at least for up to around two-qubits.
275+
default_natoms = nprocs # heuristic
276+
#TODO: factor in the mem_limit value to more intelligently set the default number of atoms.
265277

266278
natoms, na, npp, param_dimensions, param_blk_sizes = self._compute_processor_distribution(
267-
array_types, nprocs, num_params, len(circuits), default_natoms=2 * self.model.dim) # heuristic?
279+
array_types, nprocs, num_params, len(circuits), default_natoms=default_natoms)
280+
268281
printer.log(f'Num Param Processors {npp}')
269282

270283
printer.log("MapLayout: %d processors divided into %s (= %d) grid along circuit and parameter directions." %
@@ -273,8 +286,9 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, array_types
273286
assert(_np.prod((na,) + npp) <= nprocs), "Processor grid size exceeds available processors!"
274287

275288
layout = _MapCOPALayout(circuits, self.model, dataset, self._max_cache_size, natoms, na, npp,
276-
param_dimensions, param_blk_sizes, resource_alloc, verbosity,
277-
layout_creation_circuit_cache= layout_creation_circuit_cache)
289+
param_dimensions, param_blk_sizes, resource_alloc,circuit_partition_cost_functions,
290+
verbosity, layout_creation_circuit_cache= layout_creation_circuit_cache,
291+
load_balancing_parameters=load_balancing_parameters)
278292

279293
if mem_limit is not None:
280294
loc_nparams1 = num_params / npp[0] if len(npp) > 0 else 0

0 commit comments

Comments
 (0)