diff --git a/CHANGELOG b/CHANGELOG index 031fb36ad..299897a33 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,5 +1,17 @@ # CHANGELOG +## [0.9.14.2] - 2025-11-08 + +This is a bugfix release patching the following issues: + +* Bugfixes for diamond-distance wildcard budget computation in the context of leakage-aware analysis as reported in #652. (#671) +* Changes to default behavior of leakage-aware gauge optimization suite to address the manner in which relational leakage errors are attributed to each gate. Additional correctness checks/unit tests for leakage GST modeling. (#671) +* Bugfix for issue reported in #644 where it was found that when gauge-optimizing models with instruments using fidelity or trace distance as cost functions the instrument parameters were being ignored. Note that it was originally reported/thought that this applied to frobenius distance as well, but that turned out not to be true and this was working correctly. (#672) +* Fix for issue #600, which found the `Model.is_equivalent` method did not work for 'full TP' models. (#657) +* tqdm added as proper dependency (#653, #656) +* Shared memory bugs in objective function computation when parallelizing across parameters. (#660, #674) +* Fixes a bug in DataSet count retrieval and adds unit tests. (#663) + ## [0.9.14.1] - 2025-08-30 This is a bugfix release patching two issues: diff --git a/README.md b/README.md index fc0324f8e..62181928b 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ ******************************************************************************** - pyGSTi 0.9.14.1 + pyGSTi 0.9.14.2 ******************************************************************************** [![master build](https://img.shields.io/github/actions/workflow/status/sandialabs/pyGSTi/beta-master.yml?branch=master&label=master)](https://github.com/sandialabs/pyGSTi/actions/workflows/beta-master.yml) diff --git a/pygsti/algorithms/core.py b/pygsti/algorithms/core.py index 81871d5e9..0edcd6939 100644 --- a/pygsti/algorithms/core.py +++ b/pygsti/algorithms/core.py @@ -15,6 +15,7 @@ import time as _time import copy as _copy import warnings as _warnings +from typing import Union import numpy as _np import scipy.optimize as _spo @@ -121,7 +122,7 @@ def run_lgst(dataset, prep_fiducials, effect_fiducials, target_model, op_labels= # We would like to get X or it's gauge equivalent. # noqa # We do: 1) (I^-1)*AXB ~= B^-1 X B := Xhat -- we solve Ii*A*B = identity for Ii # noqa # 2) B * Xhat * B^-1 ==> X (but what if B is non-invertible -- say rectangular) Want B*(something) ~ identity ?? # noqa - # for lower rank target models, want a gauge tranformation that brings Xhat => X of "increased dim" model # noqa + # for lower rank target models, want a gauge transformation that brings Xhat => X of "increased dim" model # noqa # want "B^-1" such that B(gsDim,nRhoSpecs) "B^-1"(nRhoSpecs,gsDim) ~ Identity(gsDim) # noqa # Ub,sb,Vb = svd(B) so B = Ub*diag(sb)*Vb where Ub = (gsDim,M), s = (M,M), Vb = (M,prepSpecs) # noqa # if B^-1 := VbT*sb^-1*Ub^-1 then B*B^-1 = I(gsDim) # noqa @@ -386,7 +387,7 @@ def _lgst_matrix_dims(model, prep_fiducials, effect_fiducials): nRhoSpecs = len(prep_fiducials) # no instruments allowed in prep_fiducials povmLbls = [model.split_circuit(s, ('povm',))[2] # povm_label for s in effect_fiducials] - povmLens = ([len(model.povms[l]) for l in povmLbls]) + povmLens = ([len(model.povms[povm_label]) for povm_label in povmLbls]) nESpecs = sum(povmLens) return nRhoSpecs, nESpecs, povmLbls, povmLens @@ -725,7 +726,7 @@ def run_iterative_gst(dataset, start_model, circuit_lists, circuit_lists : list of lists of (tuples or Circuits) The i-th element is a list of the circuits to be used in the i-th iteration - of the optimization. Each element of these lists is a circuit, specifed as + of the optimization. Each element of these lists is a circuit, specified as either a Circuit object or as a tuple of operation labels (but all must be specified using the same type). e.g. [ [ (), ('Gx',) ], [ (), ('Gx',), ('Gy',) ], [ (), ('Gx',), ('Gy',), ('Gx','Gy') ] ] @@ -785,7 +786,8 @@ def run_iterative_gst(dataset, start_model, circuit_lists, return models, optimums, final_objfn, mdc_store_list def iterative_gst_generator(dataset, start_model, circuit_lists, - optimizer, iteration_objfn_builders, final_objfn_builders, + optimizer: Union[_SimplerLMOptimizer, dict, list[_SimplerLMOptimizer], list[dict]], + iteration_objfn_builders, final_objfn_builders, resource_alloc, starting_index=0, verbosity=0): """ Performs Iterative Gate Set Tomography on the dataset. @@ -804,14 +806,17 @@ def iterative_gst_generator(dataset, start_model, circuit_lists, circuit_lists : list of lists of (tuples or Circuits) The i-th element is a list of the circuits to be used in the i-th iteration - of the optimization. Each element of these lists is a circuit, specifed as + of the optimization. Each element of these lists is a circuit, specified as either a Circuit object or as a tuple of operation labels (but all must be specified using the same type). e.g. [ [ (), ('Gx',) ], [ (), ('Gx',), ('Gy',) ], [ (), ('Gx',), ('Gy',), ('Gx','Gy') ] ] - - optimizer : Optimizer or dict + + optimizer : Optimizer, or dict, or list of Optimizer, or list of dict (default None) The optimizer to use, or a dictionary of optimizer parameters - from which a default optimizer can be built. + from which a default optimizer can be built. If a list, the length + of the list should either be 1 or equal to the number of iterations. + If 1, then this optimizer is used for every iteration, otherwise + each optimizer is used for its corresponding iteration. iteration_objfn_builders : list List of ObjectiveFunctionBuilder objects defining which objective functions @@ -847,7 +852,25 @@ def iterative_gst_generator(dataset, start_model, circuit_lists, (an "evaluated" model-dataset-circuits store). """ resource_alloc = _ResourceAllocation.cast(resource_alloc) - optimizer = optimizer if isinstance(optimizer, _Optimizer) else _SimplerLMOptimizer.cast(optimizer) + if optimizer is None: + optimizer = _SimplerLMOptimizer.cast(None) + if isinstance(optimizer,list): + if len(optimizer) == 1: + optimizer = optimizer*len(circuit_lists) + if isinstance(optimizer, (_Optimizer, dict)): + optimizers = [optimizer]*len(circuit_lists) + + elif not isinstance(optimizer, list): + raise ValueError(f'Invalid argument for optimizers of type {type(optimizer)}, supported types are list, Optimizer, or dict.') + else: + optimizers = optimizer + + assert len(optimizers) == 1 or len(optimizers) == len(circuit_lists), f'Optimizers must be length 1 or length {len(circuit_lists)=}' + + temp_optimizers = [] + for opt in optimizers: + temp_optimizers.append(opt if isinstance(opt, _Optimizer) else _SimplerLMOptimizer.cast(opt)) + optimizers = temp_optimizers comm = resource_alloc.comm profiler = resource_alloc.profiler printer = VerbosityPrinter.create_printer(verbosity, comm) @@ -872,8 +895,8 @@ def _max_array_types(artypes_list): # get the maximum number of each array type #These lines were previously in the loop below, but we should be able to move it out from there so we can use it #in precomputing layouts: - method_names = optimizer.called_objective_methods - array_types = optimizer.array_types + \ + method_names = optimizers[0].called_objective_methods + array_types = optimizers[0].array_types + \ _max_array_types([builder.compute_array_types(method_names, mdl.sim) for builder in iteration_objfn_builders + final_objfn_builders]) @@ -929,11 +952,11 @@ def _max_array_types(artypes_list): # get the maximum number of each array type for j, obj_fn_builder in enumerate(iteration_objfn_builders): tNxt = _time.time() if i == 0 and j == 0: # special case: in first optimization run, use "first_fditer" - first_iter_optimizer = _copy.deepcopy(optimizer) # use a separate copy of optimizer, as it - first_iter_optimizer.fditer = optimizer.first_fditer # is a persistent object (so don't modify!) + first_iter_optimizer = _copy.deepcopy(optimizers[i]) # use a separate copy of optimizer, as it + first_iter_optimizer.fditer = optimizers[i].first_fditer # is a persistent object (so don't modify!) opt_result, mdc_store = run_gst_fit(mdc_store, first_iter_optimizer, obj_fn_builder, printer - 1) else: - opt_result, mdc_store = run_gst_fit(mdc_store, optimizer, obj_fn_builder, printer - 1) + opt_result, mdc_store = run_gst_fit(mdc_store, optimizers[i], obj_fn_builder, printer - 1) profiler.add_time('run_iterative_gst: iter %d %s-opt' % (i + 1, obj_fn_builder.name), tNxt) tNxt = _time.time() @@ -946,7 +969,7 @@ def _max_array_types(artypes_list): # get the maximum number of each array type for j, obj_fn_builder in enumerate(final_objfn_builders): tNxt = _time.time() mdl.basis = start_model.basis - opt_result, mdc_store = run_gst_fit(mdc_store, optimizer, obj_fn_builder, printer - 1) + opt_result, mdc_store = run_gst_fit(mdc_store, optimizers[i], obj_fn_builder, printer - 1) profiler.add_time('run_iterative_gst: final %s opt' % obj_fn_builder.name, tNxt) tNxt = _time.time() printer.log("Final optimization took %.1fs\n" % (tNxt - tRef), 2) diff --git a/pygsti/protocols/gst.py b/pygsti/protocols/gst.py index df4417bbf..84e573ab3 100644 --- a/pygsti/protocols/gst.py +++ b/pygsti/protocols/gst.py @@ -21,7 +21,7 @@ import numpy as _np from scipy.stats import chi2 as _chi2 -from typing import Optional +from typing import Optional, Union, Any from pygsti.baseobjs.profiler import DummyProfiler as _DummyProfiler from pygsti.baseobjs.nicelyserializable import NicelySerializable as _NicelySerializable @@ -195,7 +195,7 @@ class StandardGSTDesign(GateSetTomographyDesign): for each germ-power the selected pairs are *different* random sets of all possible pairs (unlike fid_pairs, which specifies the *same* fiducial pairs for *all* same-germ base strings). If - fid_pairs is used in conjuction with keep_fraction, the pairs + fid_pairs is used in conjunction with keep_fraction, the pairs specified by fid_pairs are always selected, and any additional pairs are randomly selected. @@ -472,7 +472,7 @@ def retrieve_model(self, edesign, gaugeopt_target, dataset, comm): Data used to execute LGST when needed. comm : mpi4py.MPI.Comm - A MPI communicator to divide workload amoung multiple processors. + A MPI communicator to divide workload among multiple processors. Returns ------- @@ -672,7 +672,7 @@ def cast(cls, obj): """ if isinstance(obj, GSTBadFitOptions): return obj - else: # assum obj is a dict of arguments + else: # assume obj is a dict of arguments return cls(**obj) if obj else cls() # allow obj to be None => defaults def __init__(self, threshold=DEFAULT_BAD_FIT_THRESHOLD, actions=(), @@ -1314,7 +1314,8 @@ def __init__(self, initial_model=None, gaugeopt_suite='stdgaugeopt', self.unreliable_ops = ('Gcnot', 'Gcphase', 'Gms', 'Gcn', 'Gcx', 'Gcz') def run(self, data, memlimit=None, comm=None, checkpoint=None, checkpoint_path=None, disable_checkpointing=False, - simulator: Optional[ForwardSimulator.Castable]=None): + simulator: Optional[ForwardSimulator.Castable]=None, + optimizers: Optional[Union[_opt.Optimizer, dict, list[_opt.Optimizer], list[dict]]] = None): """ Run this protocol on `data`. @@ -1351,11 +1352,19 @@ def run(self, data, memlimit=None, comm=None, checkpoint=None, checkpoint_path=N Ignored if None. If not None, then we call fwdsim = ForwardSimulator.cast(simulator), and we set the .sim attribute of every Model we encounter to fwdsim. + + optimizers : Optimizer, or dict, or list of Optimizer, or list of dict (default None) + The optimizer to use, or a dictionary of optimizer parameters + from which a default optimizer can be built. If a list, the length + of the list should either be 1 or equal to the number of iterations. + If 1, then this optimizer is used for every iteration, otherwise + each optimizer is used for its corresponding iteration. Returns ------- ModelEstimateResults """ + from pygsti.forwardsims.matrixforwardsim import MatrixForwardSimulator as _MatrixFSim tref = _time.time() profile = self.profile @@ -1388,6 +1397,38 @@ def run(self, data, memlimit=None, comm=None, checkpoint=None, checkpoint_path=N data.dataset, comm) if simulator is not None: mdl_start.sim = simulator + + if optimizers is None: + optimizers = [self.optimizer]*len(circuit_lists) + + else: + if isinstance(optimizers, (_opt.Optimizer, dict)): + optimizers = [optimizers]*len(circuit_lists) + if isinstance(optimizers, list): + if len(optimizers) == 1: + optimizers = optimizers*len(circuit_lists) + else: + if not isinstance(optimizers, (list, dict)): + raise ValueError(f'Invalid argument for optimizers of type {type(optimizers)}, supported types are list, Optimizer') + temp_optimizers = [] + default_first_fditer = 1 if mdl_start and isinstance(mdl_start.sim, _MatrixFSim) else 0 + for optimizer in optimizers: + + if isinstance(optimizer, _opt.Optimizer): + temp_optimizer = _copy.deepcopy(optimizer) # don't mess with caller's optimizer + if hasattr(optimizer,'first_fditer') and optimizer.first_fditer is None: + # special behavior: can set optimizer's first_fditer to `None` to mean "fill with default" + temp_optimizer.first_fditer = default_first_fditer + + else: + if optimizer is None: + temp_optimizer = {} + else: + temp_optimizer = _copy.deepcopy(optimizer) # don't mess with caller's optimizer + if 'first_fditer' not in optimizer: # then add default first_fditer value + temp_optimizer['first_fditer'] = default_first_fditer + temp_optimizers.append(_opt.SimplerLMOptimizer.cast(temp_optimizer)) + optimizers = temp_optimizers if disable_checkpointing: seed_model = mdl_start.copy() @@ -1447,7 +1488,7 @@ def run(self, data, memlimit=None, comm=None, checkpoint=None, checkpoint_path=N #Run Long-sequence GST on data #Use the generator based version and query each of the intermediate results. gst_iter_generator = _alg.iterative_gst_generator( - ds, seed_model, bulk_circuit_lists, self.optimizer, + ds, seed_model, bulk_circuit_lists, optimizers, self.objfn_builders.iteration_builders, self.objfn_builders.final_builders, resource_alloc, starting_idx, printer) @@ -1501,7 +1542,7 @@ def run(self, data, memlimit=None, comm=None, checkpoint=None, checkpoint_path=N elif self.initial_model.target_model is not None: target_model = self.initial_model.target_model.copy() elif self.initial_model.model is not None: - # when we desparately need a target model but none have been specifically given: use initial model + # when we desperately need a target model but none have been specifically given: use initial model target_model = self.initial_model.model.copy() else: msg = 'Could not identify a suitable target model, this may result'\ @@ -1816,7 +1857,8 @@ def __init__(self, modes=('full TP','CPTPLND','Target'), gaugeopt_suite='stdgaug self.starting_point = {} # a dict whose keys are modes def run(self, data, memlimit=None, comm=None, checkpoint=None, checkpoint_path=None, - disable_checkpointing=False, simulator: Optional[ForwardSimulator.Castable]=None): + disable_checkpointing=False, simulator: Optional[ForwardSimulator.Castable]=None, + optimizers: Optional[Union[_opt.Optimizer, dict, list[_opt.Optimizer], list[dict]]] = None): """ Run this protocol on `data`. @@ -1854,6 +1896,13 @@ def run(self, data, memlimit=None, comm=None, checkpoint=None, checkpoint_path=N fwdsim = ForwardSimulator.cast(simulator), and we set the .sim attribute of every Model we encounter to fwdsim. + optimizers : Optimizer, or dict, or list of Optimizer, or list of dict (default None) + The optimizer to use, or a dictionary of optimizer parameters + from which a default optimizer can be built. If a list, the length + of the list should either be 1 or equal to the number of iterations. + If 1, then this optimizer is used for every iteration, otherwise + each optimizer is used for its corresponding iteration. + Returns ------- ProtocolResults @@ -1977,7 +2026,7 @@ def run(self, data, memlimit=None, comm=None, checkpoint=None, checkpoint_path=N result = gst.run(data, memlimit, comm, disable_checkpointing=disable_checkpointing, checkpoint=child_checkpoint, - checkpoint_path=checkpoint_path) + checkpoint_path=checkpoint_path, optimizers=optimizers) ret.add_estimates(result) return ret @@ -2152,7 +2201,7 @@ def _add_badfit_estimates(results, base_estimate_label, badfit_options, The *primary* estimate label to base bad-fit additions off of. badfit_options : GSTBadFitOptions - The options specifing what constitutes a "bad fit" and what actions + The options specifying what constitutes a "bad fit" and what actions to take when one occurs. optimizer : Optimizer @@ -2514,12 +2563,12 @@ def _compute_robust_scaling(scale_typ, objfn_cache, mdc_objfn): Parameters ---------- scale_typ : {'robust', 'robust+', 'Robust', 'Robust+'} - The type of robust scaling. Captial vs. lowercase "R" doesn't + The type of robust scaling. Capital vs. lowercase "R" doesn't matter to this function (it indicates whether a post-scaling re-optimization is performed elsewhere). The "+" postfix distinguishes a "version 1" scaling (no "+"), where we drastically scale down weights of especially bad sequences, from a "version 2" scaling ("+"), where - we additionaly rescale all the circuit data to achieve the desired chi2 + we additionally rescale all the circuit data to achieve the desired chi2 distribution of per-circuit goodness-of-fit values *without reordering* these values. @@ -2763,7 +2812,7 @@ def _evaluate_constraints(wv): # that also satisfies the constraints), and while doing this find the active constraints. printer.log("VERIFYING that the final wildcard budget vector is admissable") - # Used for deciding what counts as a negligable per-gate wildcard. + # Used for deciding what counts as a negligible per-gate wildcard. max_depth = 0 for circ in ds.keys(): if circ.depth > max_depth: @@ -2773,8 +2822,8 @@ def _evaluate_constraints(wv): for w_ind, w_ele in enumerate(wvec): active_constraints = {} strictly_smaller_wvec = wvec.copy() - negligable_budget = 1 / (100 * max_depth) - if abs(w_ele) > negligable_budget: # Use absolute values everywhere (wildcard vector can be negative). + negligible_budget = 1 / (100 * max_depth) + if abs(w_ele) > negligible_budget: # Use absolute values everywhere (wildcard vector can be negative). strictly_smaller_wvec[w_ind] = 0.99 * abs(w_ele) # Decrease the vector element by 1%. printer.log(" - Trialing strictly smaller vector, with element %.3g reduced from %.3g to %.3g" % (w_ind, w_ele, strictly_smaller_wvec[w_ind])) @@ -2802,7 +2851,7 @@ def _evaluate_constraints(wv): else: if budget_was_optimized: printer.log((" - Element %.3g is %.3g. This is below %.3g, so trialing snapping to zero" - " and updating.") % (w_ind, w_ele, negligable_budget)) + " and updating.") % (w_ind, w_ele, negligible_budget)) strictly_smaller_wvec[w_ind] = 0. glob_constraint, percircuit_constraint = _evaluate_constraints(strictly_smaller_wvec) if glob_constraint + _np.sum(percircuit_constraint) < 1e-4: @@ -2814,7 +2863,7 @@ def _evaluate_constraints(wv): else: # We do this instead when we're not optimizing the budget, as otherwise we'd change the budget. printer.log(" - Skipping trialing reducing element %.3g below %.3g, as it is less than %.3g" % - (w_ind, w_ele, negligable_budget)) + (w_ind, w_ele, negligible_budget)) active_constraints_list.append(active_constraints) budget.from_vector(wvec) @@ -2850,7 +2899,7 @@ def _reoptimize_with_weights(mdc_objfn, circuit_weights_dict, optimizer, verbosi The model to re-optimize. ds : DataSet - The data set to compare againts. + The data set to compare against. circuit_list : list The circuits for which data and predictions should be compared. diff --git a/test/unit/algorithms/test_core.py b/test/unit/algorithms/test_core.py index 737a5f7f8..df5be0f98 100644 --- a/test/unit/algorithms/test_core.py +++ b/test/unit/algorithms/test_core.py @@ -451,3 +451,74 @@ def test_iterative_gst_generator_starting_index(self): #Make sure we get the same result in both cases. self.assertArraysAlmostEqual(models[-1].to_vector(), models1[-1].to_vector()) + def test_iterative_gst_generator_optimizers_list(self): + + #Test that passing a different optimizer per iteration works as intended + optimizers = [] + tols = [1e1, 1e-8] + maxiters = [10, 150] + + #First create substantially different optimizers + for i in range(len(self.lsgstStrings)): + optimizers.append({'tol': tols[i], 'maxiter':maxiters[i]}) + + assert len(self.lsgstStrings) == len(tols), f' If you change {self.lsgstStrings=}, this unit test must be modified to account for it' + + generator_optimizers = core.iterative_gst_generator( + self.ds, self.mdl_clgst, self.lsgstStrings, + optimizer=optimizers, + iteration_objfn_builders=['chi2'], + final_objfn_builders=['logl'], + resource_alloc=None, verbosity=0 + ) + + models1 = [] + models0 = [] + #loop over all iterations + for j in range(0,len(self.lsgstStrings)): + + models0.append(next(generator_optimizers)[0]) + + #create a gst generator for the iteration that we are in, + #to be compared with generator + generator_step = core.iterative_gst_generator( + self.ds, self.mdl_clgst, self.lsgstStrings, + optimizer={'tol':tols[j], 'maxiter':maxiters[j]}, + iteration_objfn_builders=['chi2'], + final_objfn_builders=['logl'], + resource_alloc=None, verbosity=0, + starting_index=j + ) + + models1.append(next(generator_step)[0]) + + self.assertArraysAlmostEqual(models0[-1].to_vector(), models1[-1].to_vector()) + + def test_iterative_gst_generator_optimizer_single_item_list(self): + + # we also test use case of optimzer=[optimizer] being equivalent to optimzer=optimizer + + optimizer = {'tol': 1e1, 'maxiter':10} + generator_single_item_optimizers0 = core.iterative_gst_generator( + self.ds, self.mdl_clgst, self.lsgstStrings, + optimizer=[optimizer], + iteration_objfn_builders=['chi2'], + final_objfn_builders=['logl'], + resource_alloc=None, verbosity=0 + ) + generator_single_item_optimizers1 = core.iterative_gst_generator( + self.ds, self.mdl_clgst, self.lsgstStrings, + optimizer=optimizer, + iteration_objfn_builders=['chi2'], + final_objfn_builders=['logl'], + resource_alloc=None, verbosity=0 + ) + + models0 = [] + models1 = [] + for j in range(0,len(self.lsgstStrings)): + + models0.append(next(generator_single_item_optimizers0)[0]) + models1.append(next(generator_single_item_optimizers1)[0]) + self.assertArraysAlmostEqual(models0[-1].to_vector(), models1[-1].to_vector()) + \ No newline at end of file diff --git a/test/unit/protocols/test_gst.py b/test/unit/protocols/test_gst.py index 2b712f868..653fe908e 100644 --- a/test/unit/protocols/test_gst.py +++ b/test/unit/protocols/test_gst.py @@ -13,6 +13,7 @@ from pygsti.tools import two_delta_logl from ..util import BaseCase import pytest +import numpy as _np class GSTUtilTester(BaseCase): @@ -232,7 +233,7 @@ def _bulk_fill_probs_atom(self, array_to_fill, layout_atom, resource_alloc): super(MapForwardSimulatorWrapper, self)._bulk_fill_probs_atom(array_to_fill, layout_atom, resource_alloc) -class TestGateSetTomography(BaseProtocolData): +class GateSetTomographyTester(BaseProtocolData): """ Tests for methods in the GateSetTomography class. @@ -248,6 +249,30 @@ def test_run(self): twoDLogL = two_delta_logl(mdl_result, self.gst_data.dataset) assert twoDLogL <= 1.0 # should be near 0 for perfect data + def test_optimizer_list_run(self): + self.setUpClass() + + optimizer = {'tol':1e-5} + + proto1 = gst.GateSetTomography(smq1Q_XYI.target_model("CPTPLND"), 'stdgaugeopt', name="testGST", optimizer=optimizer) + results1 = proto1.run(self.gst_data) + results2 = proto1.run(self.gst_data, optimizers=optimizer) + + mdl_result1 = results1.estimates["testGST"].models['stdgaugeopt'] + mdl_result2 = results2.estimates["testGST"].models['stdgaugeopt'] + + assert _np.allclose(mdl_result1.to_vector() , mdl_result2.to_vector()) + + #Test that we can pass a list of len == 1 or len == edesign.circuit_lists + optimizers = [optimizer]*len(self.gst_data.edesign.circuit_lists) + results3 = proto1.run(self.gst_data, optimizers=optimizers) + results4 = proto1.run(self.gst_data, optimizers=[optimizers[0]]) + mdl_result3 = results3.estimates["testGST"].models['stdgaugeopt'] + mdl_result4 = results4.estimates["testGST"].models['stdgaugeopt'] + assert _np.allclose(mdl_result3.to_vector() , mdl_result1.to_vector()) + assert _np.allclose(mdl_result4.to_vector() , mdl_result1.to_vector()) + + def test_run_custom_sim(self, capfd: pytest.LogCaptureFixture): self.setUpClass() proto = gst.GateSetTomography(smq1Q_XYI.target_model("CPTPLND"), 'stdgaugeopt', name="testGST") @@ -261,7 +286,7 @@ def test_run_custom_sim(self, capfd: pytest.LogCaptureFixture): for estimate in results.estimates.values(): for model in estimate.models.values(): - assert isinstance(model, MapForwardSimulatorWrapper) + assert isinstance(model.sim, MapForwardSimulatorWrapper) pass @@ -317,7 +342,7 @@ def test_write_and_read_to_dir(self): assert proto_read.name == proto.name assert proto_read.badfit_options.actions == proto.badfit_options.actions -class TestStandardGST(BaseProtocolData): +class StandardGSTTester(BaseProtocolData): """ Tests for methods in the StandardGST class. @@ -355,8 +380,32 @@ def _test_run_custom_sim(self, mode, parent_capfd, check_output): assert twoDLogL <= 1.0 # should be near 0 for perfect data for estimate in results.estimates.values(): for model in estimate.models.values(): - assert isinstance(model, MapForwardSimulatorWrapper) + assert isinstance(model.sim, MapForwardSimulatorWrapper) pass + + def test_optimizer_list_run(self): + self.setUpClass() + + optimizer = {'tol':1e-5} + + proto1 = gst.StandardGST(modes=["full TP","CPTPLND","Target"]) + results1 = proto1.run(self.gst_data) + results2 = proto1.run(self.gst_data, optimizers=optimizer) + #Test that we can pass a list + optimizers = [optimizer]*len(self.gst_data.edesign.circuit_lists) + results3 = proto1.run(self.gst_data, optimizers=optimizers) + results4 = proto1.run(self.gst_data, optimizers=[optimizer]) + + for mode in ["full TP","CPTPLND","Target"]: + mdl_result1 = results1.estimates[mode].models['stdgaugeopt'] + mdl_result2 = results2.estimates[mode].models['stdgaugeopt'] + mdl_result3 = results3.estimates[mode].models['stdgaugeopt'] + mdl_result4 = results4.estimates[mode].models['stdgaugeopt'] + + assert _np.allclose(mdl_result2.to_vector() , mdl_result1.to_vector()) + assert _np.allclose(mdl_result3.to_vector() , mdl_result1.to_vector()) + assert _np.allclose(mdl_result4.to_vector() , mdl_result1.to_vector()) + def test_write_and_read_to_dir(self): #integration test to at least confirm we are writing and reading