From 10ba164d12f3119333a371e35e78223934a9be9d Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Fri, 7 Jun 2024 08:00:25 -0400 Subject: [PATCH 01/41] initial attempt at removing out unneeded commented-out function definitions. Likely over-zealous. --- pygsti/algorithms/core.py | 2 - pygsti/algorithms/fiducialselection.py | 7 - pygsti/algorithms/germselection.py | 73 -- pygsti/baseobjs/errorgenbasis.py | 10 - pygsti/baseobjs/label.py | 1 - pygsti/baseobjs/polynomial.py | 15 - pygsti/circuits/circuit.py | 2 - pygsti/circuits/circuitconstruction.py | 4 - pygsti/drivers/bootstrap.py | 49 -- pygsti/evotypes/chp/opreps.py | 61 -- pygsti/evotypes/chp/statereps.py | 10 - pygsti/evotypes/densitymx/opreps.pyx | 18 - pygsti/evotypes/densitymx_slow/effectreps.py | 21 - pygsti/evotypes/densitymx_slow/opreps.py | 20 - pygsti/evotypes/stabilizer/effectreps.pyx | 4 - pygsti/evotypes/stabilizer/opreps.pyx | 4 - pygsti/evotypes/stabilizer/statereps.pyx | 4 - pygsti/evotypes/stabilizer/termreps.pyx | 7 - pygsti/evotypes/stabilizer_slow/effectreps.py | 22 - pygsti/evotypes/stabilizer_slow/opreps.py | 4 - pygsti/evotypes/stabilizer_slow/statereps.py | 4 - pygsti/evotypes/statevec/termreps.pyx | 7 - pygsti/evotypes/statevec_slow/effectreps.py | 3 - pygsti/extras/interpygate/core.py | 36 - pygsti/extras/rb/benchmarker.py | 8 - pygsti/extras/rb/io.py | 2 - pygsti/extras/rpe/rpeconstruction.py | 2 - pygsti/forwardsims/forwardsim.py | 51 -- pygsti/forwardsims/mapforwardsim.py | 7 - pygsti/forwardsims/matrixforwardsim.py | 7 - pygsti/forwardsims/termforwardsim.py | 109 --- .../termforwardsim_calc_statevec.pyx | 651 ------------------ pygsti/io/mongodb.py | 7 - pygsti/layouts/copalayout.py | 4 - pygsti/layouts/distlayout.py | 1 - pygsti/modelmembers/modelmember.py | 17 - pygsti/modelmembers/operations/composedop.py | 23 - pygsti/modelmembers/operations/embeddedop.py | 11 - .../operations/lindbladcoefficients.py | 1 - .../operations/lindbladerrorgen.py | 79 --- pygsti/modelmembers/operations/linearop.py | 22 - pygsti/modelmembers/operations/repeatedop.py | 23 - pygsti/modelmembers/povms/basepovm.py | 34 - pygsti/modelmembers/povms/marginalizedpovm.py | 79 --- pygsti/modelmembers/states/fullpurestate.py | 24 - pygsti/modelmembers/term.py | 13 - pygsti/models/explicitmodel.py | 10 - pygsti/models/memberdict.py | 12 - pygsti/objectivefns/objectivefns.py | 97 --- pygsti/objectivefns/wildcardbudget.py | 5 - pygsti/optimize/customcg.py | 12 - pygsti/optimize/customlm.py | 357 ++++------ pygsti/optimize/optimize.py | 84 --- pygsti/optimize/wildcardopt.py | 20 - pygsti/protocols/estimate.py | 1 - pygsti/protocols/gst.py | 5 - pygsti/protocols/modeltest.py | 6 - pygsti/protocols/protocol.py | 9 - pygsti/report/factory.py | 15 - pygsti/report/fogidiagram.py | 16 - pygsti/report/reportables.py | 14 - pygsti/report/workspace.py | 16 +- pygsti/report/workspaceplots.py | 13 - pygsti/tools/basistools.py | 31 - pygsti/tools/fastcalc.pyx | 58 -- pygsti/tools/fogitools.py | 219 ------ pygsti/tools/listtools.py | 114 --- pygsti/tools/matrixtools.py | 26 - pygsti/tools/rbtheory.py | 69 -- test/test_packages/iotest/test_codecs.py | 21 - test/unit/modelmembers/test_operation.py | 6 - test/unit/objects/test_evaltree.py | 58 -- test/unit/objects/test_model.py | 10 - test/unit/objects/test_prefixtable.py | 62 -- test/unit/tools/test_likelihoodfns.py | 5 - 75 files changed, 124 insertions(+), 2810 deletions(-) delete mode 100644 test/unit/objects/test_evaltree.py delete mode 100644 test/unit/objects/test_prefixtable.py diff --git a/pygsti/algorithms/core.py b/pygsti/algorithms/core.py index dd0a21ef7..61c3a2185 100644 --- a/pygsti/algorithms/core.py +++ b/pygsti/algorithms/core.py @@ -1150,8 +1150,6 @@ def find_closest_unitary_opmx(operation_mx): # d = _np.sqrt(operation_mx.shape[0]) # I = _np.identity(d) - #def getu_1q(basisVec): # 1 qubit version - # return _spl.expm( 1j * (basisVec[0]*_tools.sigmax + basisVec[1]*_tools.sigmay + basisVec[2]*_tools.sigmaz) ) def _get_gate_mx_1q(basis_vec): # 1 qubit version return _tools.single_qubit_gate(basis_vec[0], basis_vec[1], diff --git a/pygsti/algorithms/fiducialselection.py b/pygsti/algorithms/fiducialselection.py index 536db2847..6f75275ed 100644 --- a/pygsti/algorithms/fiducialselection.py +++ b/pygsti/algorithms/fiducialselection.py @@ -409,13 +409,6 @@ def final_result_test(final_fids, verb_printer): return prepFidList, measFidList -#def bool_list_to_ind_list(boolList): -# output = _np.array([]) -# for i, boolVal in boolList: -# if boolVal == 1: -# output = _np.append(i) -# return output - def xor(*args): """ Implements logical xor function for arbitrary number of inputs. diff --git a/pygsti/algorithms/germselection.py b/pygsti/algorithms/germselection.py index 48957b90e..6588a5879 100644 --- a/pygsti/algorithms/germselection.py +++ b/pygsti/algorithms/germselection.py @@ -3295,79 +3295,6 @@ def symmetric_low_rank_spectrum_update(update, orig_e, U, proj_U, force_rank_inc #return the new eigenvalues return new_evals, True -#Note: This function won't work for our purposes because of the assumptions -#about the rank of the update on the nullspace of the matrix we're updating, -#but keeping this here commented for future reference. -#Function for doing fast calculation of the updated inverse trace: -#def riedel_style_inverse_trace(update, orig_e, U, proj_U, force_rank_increase=True): -# """ -# input: -# -# update : ndarray -# symmetric low-rank update to perform. -# This is the first half the symmetric rank decomposition s.t. -# update@update.T= the full update matrix. -# -# orig_e : ndarray -# Spectrum of the original matrix. This is a 1-D array. -# -# proj_U : ndarray -# Projector onto the complement of the column space of the -# original matrix's eigenvectors. -# -# output: -# -# trace : float -# Value of the trace of the updated psuedoinverse matrix. -# -# updated_rank : int -# total rank of the updated matrix. -# -# rank_increase_flag : bool -# a flag that is returned to indicate is a candidate germ failed to amplify additional parameters. -# This indicates things short circuited and so the scoring function should skip this germ. -# """ -# -# #First we need to for the matrix P, whose column space -# #forms an orthonormal basis for the component of update -# #that is in the complement of U. -# -# proj_update= proj_U@update -# -# #Next take the RRQR decomposition of this matrix: -# q_update, r_update, _ = _sla.qr(proj_update, mode='economic', pivoting=True) -# -# #Construct P by taking the columns of q_update corresponding to non-zero values of r_A on the diagonal. -# nonzero_indices_update= _np.nonzero(_np.diag(r_update)>1e-10) #HARDCODED (threshold is hardcoded) -# -# #if the rank doesn't increase then we can't use the Riedel approach. -# #Abort early and return a flag to indicate the rank did not increase. -# if len(nonzero_indices_update[0])==0 and force_rank_increase: -# return None, None, False -# -# P= q_update[: , nonzero_indices_update[0]] -# -# updated_rank= len(orig_e)+ len(nonzero_indices_update[0]) -# -# #Now form the matrix R_update which is given by P.T @ proj_update. -# R_update= P.T@proj_update -# -# #R_update gets concatenated with U.T@update to form -# #a block column matrixblock_column= np.concatenate([U.T@update, R_update], axis=0) -# -# Uta= U.T@update -# -# try: -# RRRDinv= R_update@_np.linalg.inv(R_update.T@R_update) -# except _np.linalg.LinAlgError as err: -# print('Numpy thinks this matrix is singular, condition number is: ', _np.linalg.cond(R_update.T@R_update)) -# print((R_update.T@R_update).shape) -# raise err -# pinv_orig_e_mat= _np.diag(1/orig_e) -# -# trace= _np.sum(1/orig_e) + _np.trace( RRRDinv@(_np.eye(Uta.shape[1]) + Uta.T@pinv_orig_e_mat@Uta)@RRRDinv.T ) -# -# return trace, updated_rank, True def minamide_style_inverse_trace(update, orig_e, U, proj_U, force_rank_increase=False): """ diff --git a/pygsti/baseobjs/errorgenbasis.py b/pygsti/baseobjs/errorgenbasis.py index 8f254198e..97975ca1a 100644 --- a/pygsti/baseobjs/errorgenbasis.py +++ b/pygsti/baseobjs/errorgenbasis.py @@ -79,11 +79,6 @@ def label_index(self, label, ok_if_missing=False): return None return self._label_indices[label] - #@property - #def sslbls(self): - # """ The support of this errorgen space, e.g., the qubits where its elements may be nontrivial """ - # return self.sslbls - def create_subbasis(self, must_overlap_with_these_sslbls): """ Create a sub-basis of this basis by including only the elements @@ -491,11 +486,6 @@ def label_index(self, elemgen_label, ok_if_missing=False): return base + indices[elemgen_label] - #@property - #def sslbls(self): - # """ The support of this errorgen space, e.g., the qubits where its elements may be nontrivial """ - # return self.sslbls - def create_subbasis(self, must_overlap_with_these_sslbls, retain_max_weights=True): """ Create a sub-basis of this basis by including only the elements diff --git a/pygsti/baseobjs/label.py b/pygsti/baseobjs/label.py index 172d63274..66b5aec0e 100644 --- a/pygsti/baseobjs/label.py +++ b/pygsti/baseobjs/label.py @@ -360,7 +360,6 @@ def map_state_space_labels(self, mapper): mapped_sslbls = [mapper(sslbl) for sslbl in self.sslbls] return Label(self.name, mapped_sslbls) - def __str__(self): """ Defines how a Label is printed out, e.g. Gx:0 or Gcnot:1:2 diff --git a/pygsti/baseobjs/polynomial.py b/pygsti/baseobjs/polynomial.py index eed6e88c0..0045cabe5 100644 --- a/pygsti/baseobjs/polynomial.py +++ b/pygsti/baseobjs/polynomial.py @@ -511,21 +511,6 @@ def __mul__(self, x): def __rmul__(self, x): return self.__mul__(x) - #Punt for now?? - #def __imul__(self, x): - # if isinstance(x, Polynomial): - # newcoeffs = {} - # for k1, v1 in self.items(): - # for k2, v2 in x.items(): - # k = tuple(sorted(k1 + k2)) - # if k in newcoeffs: newcoeffs[k] += v1 * v2 - # else: newcoeffs[k] = v1 * v2 - # self.clear() - # self.update(newcoeffs) - # else: - # self.scale(x) - # return self - def __pow__(self, n): ret = FASTPolynomial({(): 1.0}, self.max_num_vars) # max_order updated by mults below cur = self diff --git a/pygsti/circuits/circuit.py b/pygsti/circuits/circuit.py index 2ccb7aaae..822fd0b65 100644 --- a/pygsti/circuits/circuit.py +++ b/pygsti/circuits/circuit.py @@ -2650,7 +2650,6 @@ def replace_layers_with_aliases(self, alias_dict): layers = layers[:i] + c._labels + layers[i + 1:] return Circuit._fastinit(layers, self._line_labels, editable=False, occurrence=self._occurrence_id) - def change_gate_library(self, compilation, allowed_filter=None, allow_unchanged_gates=False, depth_compression=True, one_q_gate_relations=None): """ @@ -3538,7 +3537,6 @@ def cnt(obj): # obj is either a simple label or a list return sum([cnt(sub) for sub in obj]) return sum([cnt(layer_lbl) for layer_lbl in self._labels]) - def _togrid(self, identity_name): """ return a list-of-lists rep? """ diff --git a/pygsti/circuits/circuitconstruction.py b/pygsti/circuits/circuitconstruction.py index 35b8acd39..d6bc979c1 100644 --- a/pygsti/circuits/circuitconstruction.py +++ b/pygsti/circuits/circuitconstruction.py @@ -183,10 +183,6 @@ def repeat_with_max_length(x, max_length, assert_at_least_one_rep=False): """ return repeat(x, repeat_count_with_max_length(x, max_length, assert_at_least_one_rep), assert_at_least_one_rep) -#Useful for anything? -#def repeat_empty(x,max_length,assert_at_least_one_rep=False): -# return () - def repeat_and_truncate(x, n, assert_at_least_one_rep=False): """ diff --git a/pygsti/drivers/bootstrap.py b/pygsti/drivers/bootstrap.py index 8a28881a4..cc469a7e4 100644 --- a/pygsti/drivers/bootstrap.py +++ b/pygsti/drivers/bootstrap.py @@ -492,52 +492,3 @@ def _to_rms_model(gs_list, target_gs): output_gs = target_gs.copy() output_gs.from_vector(_np.mean(gsVecArray)) return output_gs - -#Unused? -#def gateset_jtracedist(mdl,target_model,mx_basis="gm"): -# output = _np.zeros(3,dtype=float) -# for i, gate in enumerate(target_model.operations.keys()): -# output[i] = _tools.jtracedist(mdl.operations[gate],target_model.operations[gate],mx_basis=mx_basis) -## print output -# return output -# -#def gateset_entanglement_fidelity(mdl,target_model): -# output = _np.zeros(3,dtype=float) -# for i, gate in enumerate(target_model.operations.keys()): -# output[i] = _tools.entanglement_fidelity(mdl.operations[gate],target_model.operations[gate]) -# return output -# -#def gateset_decomp_angle(mdl): -# output = _np.zeros(3,dtype=float) -# for i, gate in enumerate(mdl.operations.keys()): -# output[i] = _tools.decompose_gate_matrix(mdl.operations[gate]).get('pi rotations',0) -# return output -# -#def gateset_decomp_decay_diag(mdl): -# output = _np.zeros(3,dtype=float) -# for i, gate in enumerate(mdl.operations.keys()): -# output[i] = _tools.decompose_gate_matrix(mdl.operations[gate]).get('decay of diagonal rotation terms',0) -# return output -# -#def gateset_decomp_decay_offdiag(mdl): -# output = _np.zeros(3,dtype=float) -# for i, gate in enumerate(mdl.operations.keys()): -# output[i] = _tools.decompose_gate_matrix(mdl.operations[gate]).get('decay of off diagonal rotation terms',0) -# return output -# -##def gateset_fidelity(mdl,target_model,mx_basis="gm"): -## output = _np.zeros(3,dtype=float) -## for i, gate in enumerate(target_model.operations.keys()): -## output[i] = _tools.fidelity(mdl.operations[gate],target_model.operations[gate]) -## return output -# -#def gateset_diamonddist(mdl,target_model,mx_basis="gm"): -# output = _np.zeros(3,dtype=float) -# for i, gate in enumerate(target_model.operations.keys()): -# output[i] = _tools.diamonddist(mdl.operations[gate],target_model.operations[gate],mx_basis=mx_basis) -# return output -# -#def spamrameter(mdl): -# firstRho = list(mdl.preps.keys())[0] -# firstE = list(mdl.effects.keys())[0] -# return _np.dot(mdl.preps[firstRho].T,mdl.effects[firstE])[0,0] diff --git a/pygsti/evotypes/chp/opreps.py b/pygsti/evotypes/chp/opreps.py index 4c992d224..94b6279ce 100644 --- a/pygsti/evotypes/chp/opreps.py +++ b/pygsti/evotypes/chp/opreps.py @@ -49,11 +49,6 @@ def adjoint_acton_random(self, state, rand_state): def _chp_ops(self, seed_or_state=None): return self.base_chp_ops - #def chp_str(self, seed_or_state=None): - # op_str = '\n'.join(self.chp_ops(seed_or_state=seed_or_state)) - # if len(op_str) > 0: op_str += '\n' - # return op_str - def to_dense(self, on_space): try: str_ops = str(self._chp_ops()) @@ -217,63 +212,7 @@ def __init__(self, stochastic_basis, basis, initial_rates, seed_or_state, state_ super(OpRepStochastic, self).__init__(basis, _np.array(rates, 'd'), reps, seed_or_state, state_space) - #OLD - #self.basis = basis - #assert (basis.name == 'pp'), "Only Pauli basis is allowed for 'chp' evotype" - # - #if isinstance(seed_or_state, _RandomState): - # self.rand_state = seed_or_state - #else: - # self.rand_state = _RandomState(seed_or_state) - # - ##TODO: need to fix this: `basis` above functions as basis to make superoperators out of, but here we have - ## a CHP stochastic op which is given a basis for the space - e.g. a dim=2 vector space for 1 qubit, so - ## we need to distinguish/specify the basis better for this... and what about rate_poly_dicts (see svterm) - #nqubits = state_space.num_qubits - #assert(self.basis.dim == 4**nqubits), "Must have an integral number of qubits" - # - #std_chp_ops = _itgs.standard_gatenames_chp_conversions() - # - ## For CHP, need to make a Composed + EmbeddedOp for the super operators - ## For lower overhead, make this directly using the rep instead of with objects - #self.stochastic_superop_reps = [] - #for label in self.basis.labels[1:]: - # combined_chp_ops = [] - # - # for i, pauli in enumerate(label): - # name = 'Gi' if pauli == "I" else 'G%spi' % pauli.lower() - # chp_op = std_chp_ops[name] - # chp_op_targeted = [op.replace('0', str(i)) for op in chp_op] - # combined_chp_ops.extend(chp_op_targeted) - # - # sub_rep = OpRep(combined_chp_ops, state_space) - # self.stochastic_superop_reps.append(sub_rep) - #self.rates = initial_rates - #super(OpRepStochastic, self).__init__([], state_space) # don't store any chp_ops in base - def update_rates(self, rates): unitary_rates = [1 - sum(rates)] + list(rates) self.rates[:] = rates self.update_unitary_rates(unitary_rates) - - #TODO REMOVE - covered by OpRepRandomUnitary - #def chp_ops(self, seed_or_state=None): - # # Optionally override RNG for this call - # if seed_or_state is not None: - # if isinstance(seed_or_state, _np.random.RandomState): - # rand_state = seed_or_state - # else: - # rand_state = _np.random.RandomState(seed_or_state) - # else: - # rand_state = self.rand_state - # - # rates = self.rates - # all_rates = [*rates, 1.0 - sum(rates)] # Include identity so that probabilities are 1 - # index = rand_state.choice(self.basis.size, p=all_rates) - # - # # If final entry, no operation selected - # if index == self.basis.size - 1: - # return [] - # - # rep = self.stochastic_superop_reps[index] - # return rep._chp_ops() diff --git a/pygsti/evotypes/chp/statereps.py b/pygsti/evotypes/chp/statereps.py index b17785266..749f0ab06 100644 --- a/pygsti/evotypes/chp/statereps.py +++ b/pygsti/evotypes/chp/statereps.py @@ -40,16 +40,6 @@ def __init__(self, chp_ops, state_space): def num_qubits(self): return self.state_space.num_qubits - #REMOVE - #def chp_ops(self, seed_or_state=None): - # return self.base_chp_ops - - #REMOVE - #def chp_str(self, seed_or_state=None): - # op_str = '\n'.join(self.chp_ops(seed_or_state=seed_or_state)) - # if len(op_str) > 0: op_str += '\n' - # return op_str - def copy(self): return StateRep(self.chp_ops, self.state_space) diff --git a/pygsti/evotypes/densitymx/opreps.pyx b/pygsti/evotypes/densitymx/opreps.pyx index d3c05586a..aa1616146 100644 --- a/pygsti/evotypes/densitymx/opreps.pyx +++ b/pygsti/evotypes/densitymx/opreps.pyx @@ -727,24 +727,6 @@ cdef class OpRepExpErrorgen(OpRep): return _copy.deepcopy(self) # I think this should work using reduce/setstate framework TODO - test and maybe put in base class? -#TODO: can add this after creating OpCRep_IdentityPlusErrorgen if it seems useful -#cdef class OpRepIdentityPlusErrorgen(OpRep): -# cdef public object errorgen_rep -# -# def __init__(self, errorgen_rep): -# self.errorgen_rep = errorgen_rep -# assert(self.c_rep == NULL) -# self.c_rep = new OpCRep_IdentityPlusErrorgen((errorgen_rep).c_rep) -# self.state_space = errorgen_rep.state_space -# -# def __reduce__(self): -# return (OpRepIdentityPlusErrorgen, (self.errorgen_rep,)) -# -# #Needed? -# #def errgenrep_has_changed(self, onenorm_upperbound): -# # pass - - cdef class OpRepRepeated(OpRep): cdef public OpRep repeated_rep cdef public INT num_repetitions diff --git a/pygsti/evotypes/densitymx_slow/effectreps.py b/pygsti/evotypes/densitymx_slow/effectreps.py index 39a50a6be..b013978b8 100644 --- a/pygsti/evotypes/densitymx_slow/effectreps.py +++ b/pygsti/evotypes/densitymx_slow/effectreps.py @@ -105,11 +105,6 @@ def __init__(self, povm_factors, effect_labels, state_space): super(EffectRepTensorProduct, self).__init__(state_space) self.factor_effects_have_changed() - #TODO: fix this: - #def __reduce__(self): - # return (EffectRepTensorProduct, - # (self.kron_array, self.factor_dims, self.nfactors, self.max_factor_dim, self.dim)) - def to_dense(self, on_space, outvec=None): if on_space not in ('minimal', 'HilbertSchmidt'): @@ -164,22 +159,6 @@ def _fill_fast_kron(self): def factor_effects_have_changed(self): self._fill_fast_kron() # updates effect reps - #def to_dense(self): - # if len(self.factors) == 0: return _np.empty(0, complex if self._evotype == "statevec" else 'd') - # #NOTE: moved a fast version of to_dense to replib - could use that if need a fast to_dense call... - # - # factorPOVMs = self.factors - # ret = factorPOVMs[0][self.effectLbls[0]].to_dense() - # for i in range(1, len(factorPOVMs)): - # ret = _np.kron(ret, factorPOVMs[i][self.effectLbls[i]].to_dense()) - # return ret - # elif self._evotype == "stabilizer": - # # each factor is a StabilizerEffectVec - # raise ValueError("Cannot convert Stabilizer tensor product effect to an array!") - # # should be using effect.outcomes property... - # else: # self._evotype in ("svterm","cterm") - # raise NotImplementedError("to_dense() not implemented for %s evolution type" % self._evotype) - class EffectRepComposed(EffectRep): def __init__(self, op_rep, effect_rep, op_id, state_space): diff --git a/pygsti/evotypes/densitymx_slow/opreps.py b/pygsti/evotypes/densitymx_slow/opreps.py index 74a52c34d..ab63e3ce0 100644 --- a/pygsti/evotypes/densitymx_slow/opreps.py +++ b/pygsti/evotypes/densitymx_slow/opreps.py @@ -270,10 +270,6 @@ def update_rates(self, rates): self.rates[:] = rates self.update_unitary_rates(unitary_rates) -#class OpRepClifford(OpRep): # TODO? -# #def __init__(self, unitarymx, symplecticrep): -# # pass - class OpRepComposed(OpRep): @@ -375,13 +371,6 @@ def __init__(self, state_space, target_labels, embedded_rep): self.offset = sum(blocksizes[0:self.active_block_index]) super(OpRepEmbedded, self).__init__(state_space) - #def __reduce__(self): - # return (DMOpRepEmbedded, (self.embedded, - # self.num_basis_els, self.action_inds, - # self.blocksizes, self.embeddedDim, - # self.ncomponents, self.active_block_index, - # self.nblocks, self.dim)) - def _acton_other_blocks_trivially(self, output_state, state): offset = 0 for iBlk, blockSize in enumerate(self.blocksizes): @@ -466,15 +455,6 @@ def set_exp_params(self, mu, eta, m_star, s): def exp_params(self): return (self.mu, self.eta, self.m_star, self.s) - #def __reduce__(self): - # if self.unitary_postfactor is None: - # return (DMOpRepLindblad, (self.errorgen_rep, self.mu, self.eta, self.m_star, self.s, - # _np.empty(0, 'd'), _np.empty(0, _np.int64), _np.zeros(1, _np.int64))) - # else: - # return (DMOpRepLindblad, (self.errorgen_rep, self.mu, self.eta, self.m_star, self.s, - # self.unitary_postfactor.data, self.unitary_postfactor.indices, - # self.unitary_postfactor.indptr)) - def acton(self, state): """ Act this gate map on an input state """ statedata = state.data.copy() # must COPY because _custom... call below *modifies* "b" arg diff --git a/pygsti/evotypes/stabilizer/effectreps.pyx b/pygsti/evotypes/stabilizer/effectreps.pyx index 5efc6d8c8..9bc68e028 100644 --- a/pygsti/evotypes/stabilizer/effectreps.pyx +++ b/pygsti/evotypes/stabilizer/effectreps.pyx @@ -39,10 +39,6 @@ cdef class EffectRep(_basereps_cython.EffectRep): def nqubits(self): return self.state_space.num_qubits - #@property - #def dim(self): - # return 2**(self.c_effect._n) # assume "unitary evolution"-type mode - def probability(self, StateRep state not None): #unnecessary (just put in signature): cdef StateRep st = state return self.c_effect.probability(state.c_state) diff --git a/pygsti/evotypes/stabilizer/opreps.pyx b/pygsti/evotypes/stabilizer/opreps.pyx index c8be47755..3ed5f0ee8 100644 --- a/pygsti/evotypes/stabilizer/opreps.pyx +++ b/pygsti/evotypes/stabilizer/opreps.pyx @@ -40,10 +40,6 @@ cdef class OpRep(_basereps_cython.OpRep): def nqubits(self): return self.state_space.num_qubits - #@property - #def dim(self): - # return 2**(self.nqubits) # assume "unitary evolution"-type mode - def acton(self, StateRep state not None): cdef INT n = self.c_rep._n cdef INT namps = state.c_state._namps diff --git a/pygsti/evotypes/stabilizer/statereps.pyx b/pygsti/evotypes/stabilizer/statereps.pyx index 13cb6b0c9..782f10558 100644 --- a/pygsti/evotypes/stabilizer/statereps.pyx +++ b/pygsti/evotypes/stabilizer/statereps.pyx @@ -49,10 +49,6 @@ cdef class StateRep(_basereps_cython.StateRep): def nqubits(self): return self.state_space.num_qubits - #@property - #def dim(self): - # return 2**(self.c_state._n) # assume "unitary evolution"-type mode - def actionable_staterep(self): # return a state rep that can be acted on by op reps or mapped to # a probability/amplitude by POVM effect reps. diff --git a/pygsti/evotypes/stabilizer/termreps.pyx b/pygsti/evotypes/stabilizer/termreps.pyx index dc83b3002..728673a32 100644 --- a/pygsti/evotypes/stabilizer/termreps.pyx +++ b/pygsti/evotypes/stabilizer/termreps.pyx @@ -107,10 +107,3 @@ cdef class TermRep(_basereps_cython.TermRep): return TermRep(self.coeff.copy(), self.magnitude, self.logmagnitude, self.pre_state, self.post_state, self.pre_effect, self.post_effect, self.pre_ops, self.post_ops) - - #Not needed - and this implementation is quite right as it will need to change - # the ordering of the pre/post ops also. - #def conjugate(self): - # return TermRep(self.coeff.copy(), self.magnitude, self.logmagnitude, - # self.post_state, self.pre_state, self.post_effect, self.pre_effect, - # self.post_ops, self.pre_ops) diff --git a/pygsti/evotypes/stabilizer_slow/effectreps.py b/pygsti/evotypes/stabilizer_slow/effectreps.py index 5bf2dfe33..6e5d8bb21 100644 --- a/pygsti/evotypes/stabilizer_slow/effectreps.py +++ b/pygsti/evotypes/stabilizer_slow/effectreps.py @@ -23,10 +23,6 @@ def __init__(self, state_space): def nqubits(self): return self.state_space.num_qubits - #@property - #def dim(self): - # return 2**self.nqubits # assume "unitary evolution"-type mode - def probability(self, state): return state.sframe.measurement_probability(self.zvals, check=True) # use check for now? @@ -37,10 +33,6 @@ def to_dense(self, on_space): return _mt.zvals_to_dense(self.zvals, superket=bool(on_space not in ('minimal', 'Hilbert'))) -#class EffectRepConjugatedState(EffectRep): -# pass # TODO - this should be possible - - class EffectRepComputational(EffectRep): def __init__(self, zvals, basis, state_space): @@ -49,17 +41,6 @@ def __init__(self, zvals, basis, state_space): assert(self.state_space.num_qubits == len(self.zvals)) super(EffectRepComputational, self).__init__(state_space) - #@property - #def outcomes(self): - # """ - # The 0/1 outcomes identifying this effect within its StabilizerZPOVM - # - # Returns - # ------- - # numpy.ndarray - # """ - # return self.zvals - def __str__(self): nQubits = len(self.zvals) s = "Stabilizer effect vector for %d qubits with outcome %s" % (nQubits, str(self.zvals)) @@ -80,9 +61,6 @@ def __init__(self, op_rep, effect_rep, op_id, state_space): super(EffectRepComposed, self).__init__(state_space) - #def __reduce__(self): - # return (EffectRepComposed, (self.op_rep, self.effect_rep, self.op_id, self.state_space)) - def probability(self, state): state = self.op_rep.acton(state) # *not* acton_adjoint return self.effect_rep.probability(state) diff --git a/pygsti/evotypes/stabilizer_slow/opreps.py b/pygsti/evotypes/stabilizer_slow/opreps.py index 313e6aad8..ae3b2bea4 100644 --- a/pygsti/evotypes/stabilizer_slow/opreps.py +++ b/pygsti/evotypes/stabilizer_slow/opreps.py @@ -34,10 +34,6 @@ def adjoint_acton(self, state): def nqubits(self): return self.state_space.num_qubits - #@property - #def dim(self): - # return 2**(self.nqubits) # assume "unitary evolution"-type mode - class OpRepClifford(OpRep): def __init__(self, unitarymx, symplecticrep, basis, state_space): diff --git a/pygsti/evotypes/stabilizer_slow/statereps.py b/pygsti/evotypes/stabilizer_slow/statereps.py index b47417f08..bb71b74e0 100644 --- a/pygsti/evotypes/stabilizer_slow/statereps.py +++ b/pygsti/evotypes/stabilizer_slow/statereps.py @@ -42,10 +42,6 @@ def amps(self): def nqubits(self): return self.sframe.n - #@property - #def dim(self): - # return 2**self.nqubits # assume "unitary evolution"-type mode - def actionable_staterep(self): # return a state rep that can be acted on by op reps or mapped to # a probability/amplitude by POVM effect reps. diff --git a/pygsti/evotypes/statevec/termreps.pyx b/pygsti/evotypes/statevec/termreps.pyx index 07ba8f61a..dc73ae1d0 100644 --- a/pygsti/evotypes/statevec/termreps.pyx +++ b/pygsti/evotypes/statevec/termreps.pyx @@ -112,13 +112,6 @@ cdef class TermRep(_basereps_cython.TermRep): self.pre_state, self.post_state, self.pre_effect, self.post_effect, self.pre_ops, self.post_ops) - #Not needed - and this implementation is quite right as it will need to change - # the ordering of the pre/post ops also. - #def conjugate(self): - # return TermRep(self.coeff.copy(), self.magnitude, self.logmagnitude, - # self.post_state, self.pre_state, self.post_effect, self.pre_effect, - # self.post_ops, self.pre_ops) - #Note: to use direct term reps (numerical coeffs) we'll need to update # what the members are called and add methods as was done for TermRep. diff --git a/pygsti/evotypes/statevec_slow/effectreps.py b/pygsti/evotypes/statevec_slow/effectreps.py index d1a14ebc6..2863233e5 100644 --- a/pygsti/evotypes/statevec_slow/effectreps.py +++ b/pygsti/evotypes/statevec_slow/effectreps.py @@ -180,9 +180,6 @@ def __init__(self, op_rep, effect_rep, op_id, state_space): super(EffectRepComposed, self).__init__(effect_rep.state_space) - #def __reduce__(self): - # return (EffectRepComposed, (self.op_rep, self.effect_rep, self.op_id, self.state_space)) - def probability(self, state): state = self.op_rep.acton(state) # *not* acton_adjoint return self.effect_rep.probability(state) diff --git a/pygsti/extras/interpygate/core.py b/pygsti/extras/interpygate/core.py index d7b205146..5f25ab20e 100644 --- a/pygsti/extras/interpygate/core.py +++ b/pygsti/extras/interpygate/core.py @@ -241,15 +241,6 @@ def create_object(self, args=None, sslbls=None): return InterpolatedDenseOp(target_op, self.base_interpolator, self.aux_interpolator, self.to_vector(), _np.array(args), self._argument_indices) - #def write(self, dirname): - # dirname = _pathlib.Path(dirname) - # with open(str(dirname / "targetop.pkl"), 'wb') as f: - # _pickle.dump(self.target_op, f) - # _np.save(dirname / "paramvec.np", self._paramvec_with_time) - # self.base_interpolator.write(dirname / "base.interp") - # if self.aux_interpolator is not None: - # self.aux_interptolator.write(dirname / "aux.interp") - @property def num_params(self): return len(self._paramvec) @@ -267,24 +258,6 @@ def from_vector(self, v, close=False, dirty_value=True): class InterpolatedDenseOp(_DenseOperator): - #@classmethod - #def from_dir(cls, dirname): - # dirname = _pathlib.Path(dirname) - # with open(str(dirname / "targetop.pkl"), 'rb') as f: - # target_op = _pickle.load(f) - # pt = _np.load(dirname / "paramvec.np") - # base_interp = InterpolatedQuantity.from_file(dirname / "base.interp") - # aux_interp = InterpolatedQuantity.from_file(dirname / "aux.interp") \ - # if (dirname / "aux.interp").exists() else None - # - # if base_interp.times is not None: - # tm = pt[-1] - # pt = pt[0:-1] - # else: - # tm = None - # - # return cls(target_op, base_interp, aux_interp, pt, tm) - @classmethod def create_by_interpolating_physical_process(cls, target_op, physical_process, parameter_ranges=None, parameter_points=None, comm=None, @@ -391,15 +364,6 @@ def __init__(self, target_op, base_interpolator, aux_interpolator=None, initial_ # initialize object self.from_vector(self._paramvec) - #def write(self, dirname): - # dirname = _pathlib.Path(dirname) - # with open(str(dirname / "targetop.pkl"), 'wb') as f: - # _pickle.dump(self.target_op, f) - # _np.save(dirname / "paramvec.np", self._paramvec_with_time) - # self.base_interpolator.write(dirname / "base.interp") - # if self.aux_interpolator is not None: - # self.aux_interptolator.write(dirname / "aux.interp") - @property def num_params(self): return len(self._paramvec) diff --git a/pygsti/extras/rb/benchmarker.py b/pygsti/extras/rb/benchmarker.py index 03018b341..1b9b31e5d 100644 --- a/pygsti/extras/rb/benchmarker.py +++ b/pygsti/extras/rb/benchmarker.py @@ -506,10 +506,6 @@ def generate_success_or_fail_dataset(self, overwrite=False): self.multids['success-fail'] = sfmultids - # def get_all_data(self): - - # for circ - def summary_data(self, datatype, specindex, qubits=None): spec = self._specs[specindex] @@ -522,10 +518,6 @@ def summary_data(self, datatype, specindex, qubits=None): return self.pass_summary_data[specindex][qubits][datatype] - #def getauxillary_data(self, datatype, specindex, qubits=None): - - #def get_predicted_summary_data(self, prediction, datatype, specindex, qubits=None): - def create_summary_data(self, predictions=None, verbosity=2, auxtypes=None): """ todo diff --git a/pygsti/extras/rb/io.py b/pygsti/extras/rb/io.py index d1452494c..a21b306f2 100644 --- a/pygsti/extras/rb/io.py +++ b/pygsti/extras/rb/io.py @@ -22,8 +22,6 @@ from pygsti.data import multidataset as _mds -#def load_benchmarking_data(basedir): - def load_benchmarker(directory, load_datasets=True, verbosity=1): """ diff --git a/pygsti/extras/rpe/rpeconstruction.py b/pygsti/extras/rpe/rpeconstruction.py index 6b48af2f8..6559592e2 100644 --- a/pygsti/extras/rpe/rpeconstruction.py +++ b/pygsti/extras/rpe/rpeconstruction.py @@ -113,8 +113,6 @@ def create_parameterized_rpe_model(alpha_true, epsilon_true, aux_rot, spam_depol return outputModel -#def make_rpe_alpha_str_lists(k_list,angleStr,rpeconfig_inst): - def create_rpe_angle_circuit_lists(k_list, angle_name, rpeconfig_inst): """ diff --git a/pygsti/forwardsims/forwardsim.py b/pygsti/forwardsims/forwardsim.py index 2ae19f2f3..3d85d92e1 100644 --- a/pygsti/forwardsims/forwardsim.py +++ b/pygsti/forwardsims/forwardsim.py @@ -147,57 +147,6 @@ def _set_evotype(self, evotype): `evotype` will be `None` when the current model is None""" pass - #def to_vector(self): - # """ - # Returns the parameter vector of the associated Model. - # - # Returns - # ------- - # numpy array - # The vectorized model parameters. - # """ - # return self.paramvec - # - #def from_vector(self, v, close=False, nodirty=False): - # """ - # The inverse of to_vector. - # - # Initializes the Model-like members of this - # calculator based on `v`. Used for computing finite-difference derivatives. - # - # Parameters - # ---------- - # v : numpy.ndarray - # The parameter vector. - # - # close : bool, optional - # Set to `True` if `v` is close to the current parameter vector. - # This can make some operations more efficient. - # - # nodirty : bool, optional - # If True, the framework for marking and detecting when operations - # have changed and a Model's parameter-vector needs to be updated - # is disabled. Disabling this will increases the speed of the call. - # - # Returns - # ------- - # None - # """ - # #Note: this *will* initialize the parent Model's objects too, - # # since only references to preps, effects, and gates are held - # # by the calculator class. ORDER is important, as elements of - # # POVMs and Instruments rely on a fixed from_vector ordering - # # of their simplified effects/gates. - # self.paramvec = v.copy() # now self.paramvec is *not* the same as the Model's paramvec - # self.sos.from_vector(v, close, nodirty) # so don't always want ", nodirty=True)" - we - # # need to set dirty flags so *parent* will re-init it's paramvec... - # - # #Re-init reps for computation - # #self.operationreps = { i:self.operations[lbl].torep() for lbl,i in self.operation_lookup.items() } - # #self.operationreps = { lbl:g.torep() for lbl,g in gates.items() } - # #self.prepreps = { lbl:p.torep('prep') for lbl,p in preps.items() } - # #self.effectreps = { lbl:e.torep('effect') for lbl,e in effects.items() } - def _compute_circuit_outcome_probabilities(self, array_to_fill, circuit, outcomes, resource_alloc, time=None): raise NotImplementedError("Derived classes should implement this!") diff --git a/pygsti/forwardsims/mapforwardsim.py b/pygsti/forwardsims/mapforwardsim.py index 8a7140291..501c1f855 100644 --- a/pygsti/forwardsims/mapforwardsim.py +++ b/pygsti/forwardsims/mapforwardsim.py @@ -300,13 +300,6 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, array_types layout._param_dimensions, (loc_nparams1, loc_nparams2), (blk1, blk2), max_atom_cachesize, self.model.dim) - #def approx_mem_estimate(nc, np1, np2): - # approx_cachesize = (num_circuits / nc) * 1.3 # inflate expected # of circuits per atom => cache_size - # return _bytes_for_array_types(array_types, num_elements, num_elements / nc, - # num_circuits, num_circuits / nc, - # (num_params, num_params), (num_params / np1, num_params / np2), - # approx_cachesize, self.model.dim) - GB = 1.0 / 1024.0**3 if mem_estimate > mem_limit: raise MemoryError("Not enough memory for desired layout! (limit=%.1fGB, required=%.1fGB)" % ( diff --git a/pygsti/forwardsims/matrixforwardsim.py b/pygsti/forwardsims/matrixforwardsim.py index a24c1322d..2952ddef0 100644 --- a/pygsti/forwardsims/matrixforwardsim.py +++ b/pygsti/forwardsims/matrixforwardsim.py @@ -1141,13 +1141,6 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, array_types (blk1, blk2), max_atom_cachesize, self.model.evotype.minimal_dim(self.model.state_space)) - #def approx_mem_estimate(natoms, np1, np2): - # approx_cachesize = (num_circuits / natoms) * 1.3 # inflate expected # circuits per atom => cache_size - # return _bytes_for_array_types(array_types, num_elements, num_elements / natoms, - # num_circuits, num_circuits / natoms, - # (num_params, num_params), (num_params / np1, num_params / np2), - # approx_cachesize, self.model.state_space.dim) - GB = 1.0 / 1024.0**3 if mem_estimate > mem_limit: raise MemoryError("Not enough memory for desired layout! (limit=%.1fGB, required=%.1fGB)" % ( diff --git a/pygsti/forwardsims/termforwardsim.py b/pygsti/forwardsims/termforwardsim.py index d6b00b4fc..06a890992 100644 --- a/pygsti/forwardsims/termforwardsim.py +++ b/pygsti/forwardsims/termforwardsim.py @@ -243,16 +243,6 @@ def __getstate__(self): # and this is done by the parent model which will cause _set_evotype to be called. return state - #OLD - now we have a _set_evotype method. - #@_ForwardSimulator.model.setter - #def model(self, val): - # _ForwardSimulator.model.fset(self, val) # set the base class property (self.model) - # - # #Do some additional initialization - # if self.model.evotype not in ("svterm", "cterm"): - # #raise ValueError(f"Evolution type {self.model.evotype} is incompatible with term-based calculations") - # _warnings.warn("Evolution type %s is incompatible with term-based calculations" % self.model.evotype) - def copy(self): """ Return a shallow copy of this TermForwardSimulator. @@ -416,105 +406,6 @@ def _bulk_fill_hprobs_atom(self, array_to_fill, dest_param_slice1, dest_param_sl hpolys[0], hpolys[1], self.model.to_vector(), (nEls, len(wrtInds1), len(wrtInds2))) _fas(array_to_fill, [slice(0, array_to_fill.shape[0]), dest_param_slice1, dest_param_slice2], hprobs) - #DIRECT FNS - keep these around, but they need to be updated (as do routines in fastreplib.pyx) - #def _prs_directly(self, layout_atom, resource_alloc): #comm=None, mem_limit=None, reset_wts=True, repcache=None): - # """ - # Compute probabilities of `layout`'s circuits using "direct" mode. - # - # Parameters - # ---------- - # layout : CircuitOutcomeProbabilityArrayLayout - # The layout. - # - # comm : mpi4py.MPI.Comm, optional - # When not None, an MPI communicator for distributing the computation - # across multiple processors. Distribution is performed over - # subtrees of eval_tree (if it is split). - # - # mem_limit : int, optional - # A rough memory limit in bytes. - # - # reset_wts : bool, optional - # Whether term magnitudes should be updated based on current term coefficients - # (which are based on the current point in model-parameter space) or not. - # - # repcache : dict, optional - # A cache of term representations for increased performance. - # """ - # prs = _np.empty(layout_atom.num_elements, 'd') - # #print("Computing prs directly for %d circuits" % len(circuit_list)) - # if repcache is None: repcache = {} # new repcache... - # k = 0 # *linear* evaluation order so we know final indices are just running - # for i in eval_tree.evaluation_order(): - # circuit = eval_tree[i] - # #print("Computing prs directly: circuit %d of %d" % (i,len(circuit_list))) - # assert(self.evotype == "svterm") # for now, just do SV case - # fastmode = False # start with slow mode - # wtTol = 0.1 - # rholabel = circuit[0] - # opStr = circuit[1:] - # elabels = eval_tree.simplified_circuit_elabels[i] - # prs[k:k + len(elabels)] = replib.SV_prs_directly(self, rholabel, elabels, opStr, - # repcache, comm, mem_limit, fastmode, wtTol, reset_wts, - # self.times_debug) - # k += len(elabels) - # #print("PRS = ",prs) - # return prs - # - #def _dprs_directly(self, eval_tree, wrt_slice, comm=None, mem_limit=None, reset_wts=True, repcache=None): - # """ - # Compute probability derivatives of `eval_tree`'s circuits using "direct" mode. - # - # Parameters - # ---------- - # eval_tree : TermEvalTree - # The evaluation tree. - # - # wrt_slice : slice - # A slice specifying which model parameters to differentiate with respect to. - # - # comm : mpi4py.MPI.Comm, optional - # When not None, an MPI communicator for distributing the computation - # across multiple processors. Distribution is performed over - # subtrees of eval_tree (if it is split). - # - # mem_limit : int, optional - # A rough memory limit in bytes. - # - # reset_wts : bool, optional - # Whether term magnitudes should be updated based on current term coefficients - # (which are based on the current point in model-parameter space) or not. - # - # repcache : dict, optional - # A cache of term representations for increased performance. - # """ - # #Note: Finite difference derivatives are SLOW! - # if wrt_slice is None: - # wrt_indices = list(range(self.Np)) - # elif isinstance(wrt_slice, slice): - # wrt_indices = _slct.indices(wrt_slice) - # else: - # wrt_indices = wrt_slice - # - # eps = 1e-6 # HARDCODED - # probs = self._prs_directly(eval_tree, comm, mem_limit, reset_wts, repcache) - # dprobs = _np.empty((eval_tree.num_final_elements(), len(wrt_indices)), 'd') - # orig_vec = self.to_vector().copy() - # iParamToFinal = {i: ii for ii, i in enumerate(wrt_indices)} - # for i in range(self.Np): - # #print("direct dprobs cache %d of %d" % (i,self.Np)) - # if i in iParamToFinal: # LATER: add MPI support? - # iFinal = iParamToFinal[i] - # vec = orig_vec.copy(); vec[i] += eps - # self.from_vector(vec, close=True) - # dprobs[:, iFinal] = (self._prs_directly(eval_tree, - # comm=None, - # mem_limit=None, - # reset_wts=False, - # repcache=repcache) - probs) / eps - # self.from_vector(orig_vec, close=True) - # return dprobs - ## ----- Find a "minimal" path set (i.e. find thresholds for each circuit ----- def _compute_pruned_pathmag_threshold(self, rholabel, elabels, circuit, polynomial_vindices_per_int, repcache, circuitsetup_cache, diff --git a/pygsti/forwardsims/termforwardsim_calc_statevec.pyx b/pygsti/forwardsims/termforwardsim_calc_statevec.pyx index d6a2d5ac0..e784aaa24 100644 --- a/pygsti/forwardsims/termforwardsim_calc_statevec.pyx +++ b/pygsti/forwardsims/termforwardsim_calc_statevec.pyx @@ -1531,654 +1531,3 @@ def circuit_achieved_and_max_sopm(fwdsim, rholabel, elabels, circuit, repcache, max_sopm[i] = max_sum_of_pathmags[i] return achieved_sopm, max_sopm - - - - -# State-vector direct-term calcs ------------------------- - -#cdef vector[vector[TermDirectCRep_ptr]] extract_cterms_direct(python_termrep_lists, INT max_order): -# cdef vector[vector[TermDirectCRep_ptr]] ret = vector[vector[TermDirectCRep_ptr]](max_order+1) -# cdef vector[TermDirectCRep*] vec_of_terms -# for order,termreps in enumerate(python_termrep_lists): # maxorder+1 lists -# vec_of_terms = vector[TermDirectCRep_ptr](len(termreps)) -# for i,termrep in enumerate(termreps): -# vec_of_terms[i] = (termrep).c_term -# ret[order] = vec_of_terms -# return ret - -#def prs_directly(calc, rholabel, elabels, circuit, repcache, comm=None, mem_limit=None, fastmode=True, wt_tol=0.0, reset_term_weights=True, debug=None): -# -# # Create gatelable -> int mapping to be used throughout -# distinct_gateLabels = sorted(set(circuit)) -# glmap = { gl: i for i,gl in enumerate(distinct_gateLabels) } -# t0 = pytime.time() -# -# # Convert circuit to a vector of ints -# cdef INT i, j -# cdef vector[INT] cgatestring -# for gl in circuit: -# cgatestring.push_back(glmap[gl]) -# -# #TODO: maybe compute these weights elsewhere and pass in? -# cdef double circuitWeight -# cdef double remaingingWeightTol = wt_tol -# cdef vector[double] remainingWeight = vector[double](len(elabels)) -# if 'circuitWeights' not in repcache: -# repcache['circuitWeights'] = {} -# if reset_term_weights or circuit not in repcache['circuitWeights']: -# circuitWeight = calc.sos.get_prep(rholabel).total_term_weight() -# for gl in circuit: -# circuitWeight *= calc.sos.get_operation(gl).total_term_weight() -# for i,elbl in enumerate(elabels): -# remainingWeight[i] = circuitWeight * calc.sos.get_effect(elbl).total_term_weight() -# repcache['circuitWeights'][circuit] = [ remainingWeight[i] for i in range(remainingWeight.size()) ] -# else: -# for i,wt in enumerate(repcache['circuitWeights'][circuit]): -# assert(wt > 1.0) -# remainingWeight[i] = wt -# -# #if reset_term_weights: -# # print "Remaining weights: " -# # for i in range(remainingWeight.size()): -# # print remainingWeight[i] -# -# cdef double order_base = 0.1 # default for now - TODO: make this a calc param like max_order? -# cdef INT order -# cdef INT numEs = len(elabels) -# -# cdef RepCacheEl repcel; -# cdef vector[TermDirectCRep_ptr] treps; -# cdef DCOMPLEX* coeffs; -# cdef vector[TermDirectCRep*] reps_at_order; -# cdef np.ndarray coeffs_array; -# cdef TermDirectRep rep; -# -# # Construct dict of gate term reps, then *convert* to c-reps, as this -# # keeps alive the non-c-reps which keep the c-reps from being deallocated... -# cdef unordered_map[INT, vector[vector[TermDirectCRep_ptr]] ] op_term_reps = unordered_map[INT, vector[vector[TermDirectCRep_ptr]] ](); # OLD = {} -# for glbl in distinct_gateLabels: -# if glbl in repcache: -# repcel = repcache[glbl] -# op_term_reps[ glmap[glbl] ] = repcel.reps -# for order in range(calc.max_order+1): -# treps = repcel.reps[order] -# coeffs_array = calc.sos.operation(glbl).get_direct_order_coeffs(order,order_base) -# coeffs = (coeffs_array.data) -# for i in range(treps.size()): -# treps[i]._coeff = coeffs[i] -# if reset_term_weights: treps[i]._magnitude = abs(coeffs[i]) -# #for order,treps in enumerate(op_term_reps[ glmap[glbl] ]): -# # for coeff,trep in zip(calc.sos.operation(glbl).get_direct_order_coeffs(order,order_base), treps): -# # trep.set_coeff(coeff) -# else: -# repcel = RepCacheEl(calc.max_order) -# for order in range(calc.max_order+1): -# reps_at_order = vector[TermDirectCRep_ptr](0) -# for t in calc.sos.operation(glbl).get_direct_order_terms(order,order_base): -# rep = (t.torep(None,None,"gate")) -# repcel.pyterm_references.append(rep) -# reps_at_order.push_back( rep.c_term ) -# repcel.reps[order] = reps_at_order -# #OLD -# #reps = [ [t.torep(None,None,"gate") for t in calc.sos.operation(glbl).get_direct_order_terms(order,order_base)] -# # for order in range(calc.max_order+1) ] -# op_term_reps[ glmap[glbl] ] = repcel.reps -# repcache[glbl] = repcel -# -# #OLD -# #op_term_reps = { glmap[glbl]: [ [t.torep(None,None,"gate") for t in calc.sos.operation(glbl).get_direct_order_terms(order,order_base)] -# # for order in range(calc.max_order+1) ] -# # for glbl in distinct_gateLabels } -# -# #Similar with rho_terms and E_terms -# cdef vector[vector[TermDirectCRep_ptr]] rho_term_reps; -# if rholabel in repcache: -# repcel = repcache[rholabel] -# rho_term_reps = repcel.reps -# for order in range(calc.max_order+1): -# treps = rho_term_reps[order] -# coeffs_array = calc.sos.prep(rholabel).get_direct_order_coeffs(order,order_base) -# coeffs = (coeffs_array.data) -# for i in range(treps.size()): -# treps[i]._coeff = coeffs[i] -# if reset_term_weights: treps[i]._magnitude = abs(coeffs[i]) -# -# #for order,treps in enumerate(rho_term_reps): -# # for coeff,trep in zip(calc.sos.prep(rholabel).get_direct_order_coeffs(order,order_base), treps): -# # trep.set_coeff(coeff) -# else: -# repcel = RepCacheEl(calc.max_order) -# for order in range(calc.max_order+1): -# reps_at_order = vector[TermDirectCRep_ptr](0) -# for t in calc.sos.prep(rholabel).get_direct_order_terms(order,order_base): -# rep = (t.torep(None,None,"prep")) -# repcel.pyterm_references.append(rep) -# reps_at_order.push_back( rep.c_term ) -# repcel.reps[order] = reps_at_order -# rho_term_reps = repcel.reps -# repcache[rholabel] = repcel -# -# #OLD -# #rho_term_reps = [ [t.torep(None,None,"prep") for t in calc.sos.prep(rholabel).get_direct_order_terms(order,order_base)] -# # for order in range(calc.max_order+1) ] -# #repcache[rholabel] = rho_term_reps -# -# #E_term_reps = [] -# cdef vector[vector[TermDirectCRep_ptr]] E_term_reps = vector[vector[TermDirectCRep_ptr]](0); -# cdef TermDirectCRep_ptr cterm; -# e_indices = [] # TODO: upgrade to C-type? -# if all([ elbl in repcache for elbl in elabels]): -# for order in range(calc.max_order+1): -# reps_at_order = vector[TermDirectCRep_ptr](0) # the term reps for *all* the effect vectors -# cur_indices = [] # the Evec-index corresponding to each term rep -# for j,elbl in enumerate(elabels): -# repcel = repcache[elbl] -# #term_reps = [t.torep(None,None,"effect") for t in calc.sos.effect(elbl).get_direct_order_terms(order,order_base) ] -# -# treps = repcel.reps[order] -# coeffs_array = calc.sos.effect(elbl).get_direct_order_coeffs(order,order_base) -# coeffs = (coeffs_array.data) -# for i in range(treps.size()): -# treps[i]._coeff = coeffs[i] -# if reset_term_weights: treps[i]._magnitude = abs(coeffs[i]) -# reps_at_order.push_back(treps[i]) -# cur_indices.extend( [j]*reps_at_order.size() ) -# -# #OLD -# #term_reps = repcache[elbl][order] -# #for coeff,trep in zip(calc.sos.effect(elbl).get_direct_order_coeffs(order,order_base), term_reps): -# # trep.set_coeff(coeff) -# #cur_term_reps.extend( term_reps ) -# # cur_indices.extend( [j]*len(term_reps) ) -# -# E_term_reps.push_back(reps_at_order) -# e_indices.append( cur_indices ) -# # E_term_reps.append( cur_term_reps ) -# -# else: -# for elbl in elabels: -# if elbl not in repcache: repcache[elbl] = RepCacheEl(calc.max_order) #[None]*(calc.max_order+1) # make sure there's room -# for order in range(calc.max_order+1): -# reps_at_order = vector[TermDirectCRep_ptr](0) # the term reps for *all* the effect vectors -# cur_indices = [] # the Evec-index corresponding to each term rep -# for j,elbl in enumerate(elabels): -# repcel = repcache[elbl] -# treps = vector[TermDirectCRep_ptr](0) # the term reps for *all* the effect vectors -# for t in calc.sos.effect(elbl).get_direct_order_terms(order,order_base): -# rep = (t.torep(None,None,"effect")) -# repcel.pyterm_references.append(rep) -# treps.push_back( rep.c_term ) -# reps_at_order.push_back( rep.c_term ) -# repcel.reps[order] = treps -# cur_indices.extend( [j]*treps.size() ) -# #term_reps = [t.torep(None,None,"effect") for t in calc.sos.effect(elbl).get_direct_order_terms(order,order_base) ] -# #repcache[elbl][order] = term_reps -# #cur_term_reps.extend( term_reps ) -# #cur_indices.extend( [j]*len(term_reps) ) -# E_term_reps.push_back(reps_at_order) -# e_indices.append( cur_indices ) -# #E_term_reps.append( cur_term_reps ) -# -# #convert to c-reps -# cdef INT gi -# #cdef vector[vector[TermDirectCRep_ptr]] rho_term_creps = rho_term_reps # already c-reps... -# #cdef vector[vector[TermDirectCRep_ptr]] E_term_creps = E_term_reps # already c-reps... -# #cdef unordered_map[INT, vector[vector[TermDirectCRep_ptr]]] gate_term_creps = op_term_reps # already c-reps... -# #cdef vector[vector[TermDirectCRep_ptr]] rho_term_creps = extract_cterms_direct(rho_term_reps,calc.max_order) -# #cdef vector[vector[TermDirectCRep_ptr]] E_term_creps = extract_cterms_direct(E_term_reps,calc.max_order) -# #for gi,termrep_lists in op_term_reps.items(): -# # gate_term_creps[gi] = extract_cterms_direct(termrep_lists,calc.max_order) -# -# E_cindices = vector[vector[INT]](len(e_indices)) -# for ii,inds in enumerate(e_indices): -# E_cindices[ii] = vector[INT](len(inds)) -# for jj,indx in enumerate(inds): -# E_cindices[ii][jj] = indx -# -# #Note: term calculator "dim" is the full density matrix dim -# stateDim = int(round(np.sqrt(calc.dim))) -# if debug is not None: -# debug['tstartup'] += pytime.time()-t0 -# t0 = pytime.time() -# -# #Call C-only function (which operates with C-representations only) -# cdef vector[float] debugvec = vector[float](10) -# debugvec[0] = 0.0 -# cdef vector[DCOMPLEX] prs = prs_directly( -# cgatestring, rho_term_reps, op_term_reps, E_term_reps, -# #cgatestring, rho_term_creps, gate_term_creps, E_term_creps, -# E_cindices, numEs, calc.max_order, stateDim, fastmode, &remainingWeight, remaingingWeightTol, debugvec) -# -# debug['total'] += debugvec[0] -# debug['t1'] += debugvec[1] -# debug['t2'] += debugvec[2] -# debug['t3'] += debugvec[3] -# debug['n1'] += debugvec[4] -# debug['n2'] += debugvec[5] -# debug['n3'] += debugvec[6] -# debug['t4'] += debugvec[7] -# debug['n4'] += debugvec[8] -# #if not all([ abs(prs[i].imag) < 1e-4 for i in range(prs.size()) ]): -# # print("ERROR: prs = ",[ prs[i] for i in range(prs.size()) ]) -# #assert(all([ abs(prs[i].imag) < 1e-6 for i in range(prs.size()) ])) -# return [ prs[i].real for i in range(prs.size()) ] # TODO: make this into a numpy array? - maybe pass array to fill to prs_directy above? -# -# -#cdef vector[DCOMPLEX] prs_directly( -# vector[INT]& circuit, vector[vector[TermDirectCRep_ptr]] rho_term_reps, -# unordered_map[INT, vector[vector[TermDirectCRep_ptr]]] op_term_reps, -# vector[vector[TermDirectCRep_ptr]] E_term_reps, vector[vector[INT]] E_term_indices, -# INT numEs, INT max_order, INT dim, bool fastmode, vector[double]* remainingWeight, double remTol, vector[float]& debugvec): -# -# #NOTE: circuit and gate_terms use *integers* as operation labels, not Label objects, to speed -# # lookups and avoid weird string conversion stuff with Cython -# -# cdef INT N = len(circuit) -# cdef INT* p = malloc((N+2) * sizeof(INT)) -# cdef INT i,j,k,order,nTerms -# cdef INT gn -# -# cdef INT t0 = time.clock() -# cdef INT t, n, nPaths; #for below -# -# cdef innerloopfn_direct_ptr innerloop_fn; -# if fastmode: -# innerloop_fn = pr_directly_innerloop_savepartials -# else: -# innerloop_fn = pr_directly_innerloop -# -# #extract raw data from gate_terms dictionary-of-lists for faster lookup -# #gate_term_prefactors = np.empty( (nOperations,max_order+1,dim,dim) -# #cdef unordered_map[INT, vector[vector[unordered_map[INT, complex]]]] gate_term_coeffs -# #cdef vector[vector[unordered_map[INT, complex]]] rho_term_coeffs -# #cdef vector[vector[unordered_map[INT, complex]]] E_term_coeffs -# #cdef vector[vector[INT]] e_indices -# -# cdef vector[INT]* Einds -# cdef vector[vector_TermDirectCRep_ptr_ptr] factor_lists -# -# assert(max_order <= 2) # only support this partitioning below (so far) -# -# cdef vector[DCOMPLEX] prs = vector[DCOMPLEX](numEs) -# -# for order in range(max_order+1): -# #print("DB: pr_as_polynomial order=",order) -# -# #for p in partition_into(order, N): -# for i in range(N+2): p[i] = 0 # clear p -# factor_lists = vector[vector_TermDirectCRep_ptr_ptr](N+2) -# -# if order == 0: -# #inner loop(p) -# #factor_lists = [ gate_terms[glbl][pi] for glbl,pi in zip(circuit,p) ] -# t = time.clock() -# factor_lists[0] = &rho_term_reps[p[0]] -# for k in range(N): -# gn = circuit[k] -# factor_lists[k+1] = &op_term_reps[circuit[k]][p[k+1]] -# #if factor_lists[k+1].size() == 0: continue # WHAT??? -# factor_lists[N+1] = &E_term_reps[p[N+1]] -# Einds = &E_term_indices[p[N+1]] -# -# #print("Part0 ",p) -# nPaths = innerloop_fn(factor_lists,Einds,&prs,dim,remainingWeight,0.0) #remTol) # force 0-order -# debugvec[1] += float(time.clock() - t)/time.CLOCKS_PER_SEC -# debugvec[4] += nPaths -# -# elif order == 1: -# t = time.clock(); n=0 -# for i in range(N+2): -# p[i] = 1 -# #inner loop(p) -# factor_lists[0] = &rho_term_reps[p[0]] -# for k in range(N): -# gn = circuit[k] -# factor_lists[k+1] = &op_term_reps[gn][p[k+1]] -# #if len(factor_lists[k+1]) == 0: continue #WHAT??? -# factor_lists[N+1] = &E_term_reps[p[N+1]] -# Einds = &E_term_indices[p[N+1]] -# -# #print "DB: Order1 " -# nPaths = innerloop_fn(factor_lists,Einds,&prs,dim,remainingWeight,0.0) #remTol) # force 1st-order -# p[i] = 0 -# n += nPaths -# debugvec[2] += float(time.clock() - t)/time.CLOCKS_PER_SEC -# debugvec[5] += n -# -# elif order == 2: -# t = time.clock(); n=0 -# for i in range(N+2): -# p[i] = 2 -# #inner loop(p) -# factor_lists[0] = &rho_term_reps[p[0]] -# for k in range(N): -# gn = circuit[k] -# factor_lists[k+1] = &op_term_reps[circuit[k]][p[k+1]] -# #if len(factor_lists[k+1]) == 0: continue # WHAT??? -# factor_lists[N+1] = &E_term_reps[p[N+1]] -# Einds = &E_term_indices[p[N+1]] -# -# nPaths = innerloop_fn(factor_lists,Einds,&prs,dim,remainingWeight,remTol) -# p[i] = 0 -# n += nPaths -# -# debugvec[3] += float(time.clock() - t)/time.CLOCKS_PER_SEC -# debugvec[6] += n -# t = time.clock(); n=0 -# -# for i in range(N+2): -# p[i] = 1 -# for j in range(i+1,N+2): -# p[j] = 1 -# #inner loop(p) -# factor_lists[0] = &rho_term_reps[p[0]] -# for k in range(N): -# gn = circuit[k] -# factor_lists[k+1] = &op_term_reps[circuit[k]][p[k+1]] -# #if len(factor_lists[k+1]) == 0: continue #WHAT??? -# factor_lists[N+1] = &E_term_reps[p[N+1]] -# Einds = &E_term_indices[p[N+1]] -# -# nPaths = innerloop_fn(factor_lists,Einds,&prs,dim,remainingWeight,remTol) -# p[j] = 0 -# n += nPaths -# p[i] = 0 -# debugvec[7] += float(time.clock() - t)/time.CLOCKS_PER_SEC -# debugvec[8] += n -# -# else: -# assert(False) # order > 2 not implemented yet... -# -# free(p) -# -# debugvec[0] += float(time.clock() - t0)/time.CLOCKS_PER_SEC -# return prs -# -# -# -#cdef INT pr_directly_innerloop(vector[vector_TermDirectCRep_ptr_ptr] factor_lists, vector[INT]* Einds, -# vector[DCOMPLEX]* prs, INT dim, vector[double]* remainingWeight, double remainingWeightTol): -# #print("DB partition = ","listlens = ",[len(fl) for fl in factor_lists]) -# -# cdef INT i,j,Ei -# cdef double complex scale, val, newval, pLeft, pRight, p -# cdef double wt, cwt -# cdef int nPaths = 0 -# -# cdef TermDirectCRep* factor -# -# cdef INT nFactorLists = factor_lists.size() # may need to recompute this after fast-mode -# cdef INT* factorListLens = malloc(nFactorLists * sizeof(INT)) -# cdef INT last_index = nFactorLists-1 -# -# for i in range(nFactorLists): -# factorListLens[i] = factor_lists[i].size() -# if factorListLens[i] == 0: -# free(factorListLens) -# return 0 # nothing to loop over! - (exit before we allocate more) -# -# cdef double complex coeff # THESE are only real changes from "as_polynomial" -# cdef double complex result # version of this function (where they are PolynomialCRep type) -# -# cdef StateCRep *prop1 = new StateCRep(dim) -# cdef StateCRep *prop2 = new StateCRep(dim) -# cdef StateCRep *tprop -# cdef EffectCRep* EVec -# -# cdef INT* b = malloc(nFactorLists * sizeof(INT)) -# for i in range(nFactorLists): b[i] = 0 -# -# assert(nFactorLists > 0), "Number of factor lists must be > 0!" -# -# #for factors in _itertools.product(*factor_lists): -# while(True): -# final_factor_indx = b[last_index] -# Ei = deref(Einds)[final_factor_indx] #final "factor" index == E-vector index -# wt = deref(remainingWeight)[Ei] -# if remainingWeightTol == 0.0 or wt > remainingWeightTol: #if we need this "path" -# # In this loop, b holds "current" indices into factor_lists -# factor = deref(factor_lists[0])[b[0]] # the last factor (an Evec) -# coeff = factor._coeff -# cwt = factor._magnitude -# -# for i in range(1,nFactorLists): -# coeff *= deref(factor_lists[i])[b[i]]._coeff -# cwt *= deref(factor_lists[i])[b[i]]._magnitude -# -# #pLeft / "pre" sim -# factor = deref(factor_lists[0])[b[0]] # 0th-factor = rhoVec -# prop1.copy_from(factor._pre_state) -# for j in range(factor._pre_ops.size()): -# factor._pre_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop -# for i in range(1,last_index): -# factor = deref(factor_lists[i])[b[i]] -# for j in range(factor._pre_ops.size()): -# factor._pre_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop # final state in prop1 -# factor = deref(factor_lists[last_index])[b[last_index]] # the last factor (an Evec) -# -# # can't propagate effects, so effect's post_ops are constructed to act on *state* -# EVec = factor._post_effect -# for j in range(factor._post_ops.size()): -# rhoVec = factor._post_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop # final state in prop1 -# pLeft = EVec.amplitude(prop1) -# -# #pRight / "post" sim -# factor = deref(factor_lists[0])[b[0]] # 0th-factor = rhoVec -# prop1.copy_from(factor._post_state) -# for j in range(factor._post_ops.size()): -# factor._post_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop # final state in prop1 -# for i in range(1,last_index): -# factor = deref(factor_lists[i])[b[i]] -# for j in range(factor._post_ops.size()): -# factor._post_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop # final state in prop1 -# factor = deref(factor_lists[last_index])[b[last_index]] # the last factor (an Evec) -# -# EVec = factor._pre_effect -# for j in range(factor._pre_ops.size()): -# factor._pre_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop # final state in prop1 -# pRight = EVec.amplitude(prop1).conjugate() -# -# #Add result to appropriate polynomial -# result = coeff * pLeft * pRight -# deref(prs)[Ei] = deref(prs)[Ei] + result #TODO - see why += doesn't work here -# deref(remainingWeight)[Ei] = wt - cwt # "weight" of this path -# nPaths += 1 # just for debuggins -# -# #increment b ~ itertools.product & update vec_index_noop = np.dot(self.multipliers, b) -# for i in range(nFactorLists-1,-1,-1): -# if b[i]+1 < factorListLens[i]: -# b[i] += 1 -# break -# else: -# b[i] = 0 -# else: -# break # can't increment anything - break while(True) loop -# -# #Clenaup: free allocated memory -# del prop1 -# del prop2 -# free(factorListLens) -# free(b) -# return nPaths -# -# -#cdef INT pr_directly_innerloop_savepartials(vector[vector_TermDirectCRep_ptr_ptr] factor_lists, -# vector[INT]* Einds, vector[DCOMPLEX]* prs, INT dim, -# vector[double]* remainingWeight, double remainingWeightTol): -# #print("DB partition = ","listlens = ",[len(fl) for fl in factor_lists]) -# -# cdef INT i,j,Ei -# cdef double complex scale, val, newval, pLeft, pRight, p -# -# cdef INT incd -# cdef TermDirectCRep* factor -# -# cdef INT nFactorLists = factor_lists.size() # may need to recompute this after fast-mode -# cdef INT* factorListLens = malloc(nFactorLists * sizeof(INT)) -# cdef INT last_index = nFactorLists-1 -# -# for i in range(nFactorLists): -# factorListLens[i] = factor_lists[i].size() -# if factorListLens[i] == 0: -# free(factorListLens) -# return 0 # nothing to loop over! (exit before we allocate anything else) -# -# cdef double complex coeff -# cdef double complex result -# -# #fast mode -# cdef vector[StateCRep*] leftSaved = vector[StateCRep_ptr](nFactorLists-1) # saved[i] is state after i-th -# cdef vector[StateCRep*] rightSaved = vector[StateCRep_ptr](nFactorLists-1) # factor has been applied -# cdef vector[DCOMPLEX] coeffSaved = vector[DCOMPLEX](nFactorLists-1) -# cdef StateCRep *shelved = new StateCRep(dim) -# cdef StateCRep *prop2 = new StateCRep(dim) # prop2 is always a temporary allocated state not owned by anything else -# cdef StateCRep *prop1 -# cdef StateCRep *tprop -# cdef EffectCRep* EVec -# -# cdef INT* b = malloc(nFactorLists * sizeof(INT)) -# for i in range(nFactorLists): b[i] = 0 -# assert(nFactorLists > 0), "Number of factor lists must be > 0!" -# -# incd = 0 -# -# #Fill saved arrays with allocated states -# for i in range(nFactorLists-1): -# leftSaved[i] = new StateCRep(dim) -# rightSaved[i] = new StateCRep(dim) -# -# #for factors in _itertools.product(*factor_lists): -# #for incd,fi in incd_product(*[range(len(l)) for l in factor_lists]): -# while(True): -# # In this loop, b holds "current" indices into factor_lists -# #print "DB: iter-product BEGIN" -# -# if incd == 0: # need to re-evaluate rho vector -# #print "DB: re-eval at incd=0" -# factor = deref(factor_lists[0])[b[0]] -# -# #print "DB: re-eval left" -# prop1 = leftSaved[0] # the final destination (prop2 is already alloc'd) -# prop1.copy_from(factor._pre_state) -# for j in range(factor._pre_ops.size()): -# #print "DB: re-eval left item" -# factor._pre_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop # swap prop1 <-> prop2 -# rhoVecL = prop1 -# leftSaved[0] = prop1 # final state -> saved -# # (prop2 == the other allocated state) -# -# #print "DB: re-eval right" -# prop1 = rightSaved[0] # the final destination (prop2 is already alloc'd) -# prop1.copy_from(factor._post_state) -# for j in range(factor._post_ops.size()): -# #print "DB: re-eval right item" -# factor._post_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop # swap prop1 <-> prop2 -# rhoVecR = prop1 -# rightSaved[0] = prop1 # final state -> saved -# # (prop2 == the other allocated state) -# -# #print "DB: re-eval coeff" -# coeff = factor._coeff -# coeffSaved[0] = coeff -# incd += 1 -# else: -# #print "DB: init from incd" -# rhoVecL = leftSaved[incd-1] -# rhoVecR = rightSaved[incd-1] -# coeff = coeffSaved[incd-1] -# -# # propagate left and right states, saving as we go -# for i in range(incd,last_index): -# #print "DB: propagate left begin" -# factor = deref(factor_lists[i])[b[i]] -# prop1 = leftSaved[i] # destination -# prop1.copy_from(rhoVecL) #starting state -# for j in range(factor._pre_ops.size()): -# #print "DB: propagate left item" -# factor._pre_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop -# rhoVecL = prop1 -# leftSaved[i] = prop1 -# # (prop2 == the other allocated state) -# -# #print "DB: propagate right begin" -# prop1 = rightSaved[i] # destination -# prop1.copy_from(rhoVecR) #starting state -# for j in range(factor._post_ops.size()): -# #print "DB: propagate right item" -# factor._post_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop -# rhoVecR = prop1 -# rightSaved[i] = prop1 -# # (prop2 == the other allocated state) -# -# #print "DB: propagate coeff mult" -# coeff *= factor._coeff -# coeffSaved[i] = coeff -# -# # for the last index, no need to save, and need to construct -# # and apply effect vector -# prop1 = shelved # so now prop1 (and prop2) are alloc'd states -# -# #print "DB: left ampl" -# factor = deref(factor_lists[last_index])[b[last_index]] # the last factor (an Evec) -# EVec = factor._post_effect -# prop1.copy_from(rhoVecL) # initial state (prop2 already alloc'd) -# for j in range(factor._post_ops.size()): -# factor._post_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop -# pLeft = EVec.amplitude(prop1) # output in prop1, so this is final amplitude -# -# #print "DB: right ampl" -# EVec = factor._pre_effect -# prop1.copy_from(rhoVecR) -# for j in range(factor._pre_ops.size()): -# factor._pre_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop -# pRight = EVec.amplitude(prop1).conjugate() -# -# shelved = prop1 # return prop1 to the "shelf" since we'll use prop1 for other things next -# -# #print "DB: final block" -# #print "DB running coeff = ",dict(coeff._coeffs) -# #print "DB factor coeff = ",dict(factor._coeff._coeffs) -# result = coeff * factor._coeff -# #print "DB result = ",dict(result._coeffs) -# result *= pLeft * pRight -# final_factor_indx = b[last_index] -# Ei = deref(Einds)[final_factor_indx] #final "factor" index == E-vector index -# deref(prs)[Ei] += result -# #print "DB prs[",INT(Ei),"] = ",dict(deref(prs)[Ei]._coeffs) -# -# #assert(debug < 100) #DEBUG -# #print "DB: end product loop" -# -# #increment b ~ itertools.product & update vec_index_noop = np.dot(self.multipliers, b) -# for i in range(nFactorLists-1,-1,-1): -# if b[i]+1 < factorListLens[i]: -# b[i] += 1; incd = i -# break -# else: -# b[i] = 0 -# else: -# break # can't increment anything - break while(True) loop -# -# #Cleanup: free allocated memory -# for i in range(nFactorLists-1): -# del leftSaved[i] -# del rightSaved[i] -# del prop2 -# del shelved -# free(factorListLens) -# free(b) -# return 0 #TODO: fix nPaths - diff --git a/pygsti/io/mongodb.py b/pygsti/io/mongodb.py index 1a3004157..f9a969375 100644 --- a/pygsti/io/mongodb.py +++ b/pygsti/io/mongodb.py @@ -158,16 +158,10 @@ def read_auxtree_from_mongodb_doc(mongodb, doc, auxfile_types_member='auxfile_ty def _load_auxdoc_member(mongodb, member_name, typ, metadata, quick_load): - from pymongo import ASCENDING, DESCENDING subtypes = typ.split(':') cur_typ = subtypes[0] next_typ = ':'.join(subtypes[1:]) - # In FUTURE maybe we can implement "quick loading" from a MongoDB, but currently `quick_load` does nothing - #max_size = quick_load if isinstance(quick_load, int) else QUICK_LOAD_MAX_SIZE - #def should_skip_loading(path): - # return quick_load and (path.stat().st_size >= max_size) - if cur_typ == 'list': if metadata is None: # signals that value is None, otherwise would at least be an empty list val = None @@ -809,7 +803,6 @@ def remove_auxtree_from_mongodb(mongodb, collection_name, doc_id, auxfile_types_ def _remove_auxdoc_member(mongodb, member_name, typ, metadata, session, recursive): - from pymongo import ASCENDING, DESCENDING subtypes = typ.split(':') cur_typ = subtypes[0] next_typ = ':'.join(subtypes[1:]) diff --git a/pygsti/layouts/copalayout.py b/pygsti/layouts/copalayout.py index bd5020aa8..34e907d8f 100644 --- a/pygsti/layouts/copalayout.py +++ b/pygsti/layouts/copalayout.py @@ -600,10 +600,6 @@ def fill_jtj(self, j, jtj): """ jtj[:] = _np.dot(j.T, j) - #Not needed - #def allocate_jtj_shared_mem_buf(self): - # return _np.empty((self._param_dimensions[0], self._param_dimensions[0]), 'd'), None - def memory_estimate(self, array_type, dtype='d'): """ Memory required to allocate an array of a given type (in bytes). diff --git a/pygsti/layouts/distlayout.py b/pygsti/layouts/distlayout.py index 9db1150d8..16ae93957 100644 --- a/pygsti/layouts/distlayout.py +++ b/pygsti/layouts/distlayout.py @@ -807,7 +807,6 @@ def __init__(self, circuits, unique_circuits, to_unique, unique_complete_circuit super().__init__(local_circuits, local_unique_circuits, local_to_unique, local_elindex_outcome_tuples, local_unique_complete_circuits, param_dimensions, resource_alloc) - @property def max_atom_elements(self): """ The most elements owned by a single atom. """ diff --git a/pygsti/modelmembers/modelmember.py b/pygsti/modelmembers/modelmember.py index 27e36e692..43dcdc94e 100644 --- a/pygsti/modelmembers/modelmember.py +++ b/pygsti/modelmembers/modelmember.py @@ -340,23 +340,6 @@ def unlink_parent(self, force=False): if (self.parent is not None) and (force or self.parent._obj_refcount(self) == 0): self._parent = None - # UNUSED - as this doesn't mark parameter for reallocation like it used to - #def clear_gpindices(self): - # """ - # Sets gpindices to None, along with any submembers' gpindices. - # - # This essentially marks these members for parameter re-allocation - # (e.g. if the number - not just the value - of parameters they have - # changes). - # - # Returns - # ------- - # None - # """ - # for subm in self.submembers(): - # subm.clear_gpindices() - # self._gpindices = None - def set_gpindices(self, gpindices, parent, memo=None): """ Set the parent and indices into the parent's parameter vector that are used by this ModelMember object. diff --git a/pygsti/modelmembers/operations/composedop.py b/pygsti/modelmembers/operations/composedop.py index 2a7abb24a..0cc99c929 100644 --- a/pygsti/modelmembers/operations/composedop.py +++ b/pygsti/modelmembers/operations/composedop.py @@ -491,10 +491,6 @@ def _compute_taylor_order_terms(self, order, max_polynomial_vars, gpindices_arra self.terms[order] = terms - #def _decompose_indices(x): - # return tuple(_modelmember._decompose_gpindices( - # self.gpindices, _np.array(x, _np.int64))) - mapvec = _np.ascontiguousarray(_np.zeros(max_polynomial_vars, _np.int64)) for ii, i in enumerate(gpindices_array): mapvec[i] = ii @@ -555,25 +551,6 @@ def taylor_order_terms_above_mag(self, order, max_polynomial_vars, min_term_mag) if mag >= min_term_mag: terms.append(_term.compose_terms_with_mag(factors, mag)) return terms - #def _decompose_indices(x): - # return tuple(_modelmember._decompose_gpindices( - # self.gpindices, _np.array(x, _np.int64))) - # - #mapvec = _np.ascontiguousarray(_np.zeros(max_polynomial_vars,_np.int64)) - #for ii,i in enumerate(self.gpindices_as_array()): - # mapvec[i] = ii - # - ##poly_coeffs = [t.coeff.map_indices(_decompose_indices) for t in terms] # with *local* indices - #poly_coeffs = [t.coeff.mapvec_indices(mapvec) for t in terms] # with *local* indices - #tapes = [poly.compact(complex_coeff_tape=True) for poly in poly_coeffs] - #if len(tapes) > 0: - # vtape = _np.concatenate([t[0] for t in tapes]) - # ctape = _np.concatenate([t[1] for t in tapes]) - #else: - # vtape = _np.empty(0, _np.int64) - # ctape = _np.empty(0, complex) - #coeffs_as_compact_polys = (vtape, ctape) - #self.local_term_poly_coeffs[order] = coeffs_as_compact_polys @property def total_term_magnitude(self): diff --git a/pygsti/modelmembers/operations/embeddedop.py b/pygsti/modelmembers/operations/embeddedop.py index be8ee8d8e..6c97cc217 100644 --- a/pygsti/modelmembers/operations/embeddedop.py +++ b/pygsti/modelmembers/operations/embeddedop.py @@ -276,17 +276,6 @@ def to_dense(self, on_space='minimal'): numpy.ndarray """ - #FUTURE: maybe here or in a new "tosymplectic" method, could - # create an embeded clifford symplectic rep as follows (when - # evotype == "stabilizer"): - #def tosymplectic(self): - # #Embed operation's symplectic rep in larger "full" symplectic rep - # #Note: (qubit) labels are in first (and only) tensor-product-block - # qubitLabels = self.state_space.sole_tensor_product_block_labels - # smatrix, svector = _symp.embed_clifford(self.embedded_op.smatrix, - # self.embedded_op.svector, - # self.qubit_indices,len(qubitLabels)) - embedded_dense = self.embedded_op.to_dense(on_space) if on_space == 'minimal': # resolve 'minimal' based on embedded rep type on_space = 'Hilbert' if embedded_dense.shape[0] == self.embedded_op.state_space.udim else 'HilbertSchmidt' diff --git a/pygsti/modelmembers/operations/lindbladcoefficients.py b/pygsti/modelmembers/operations/lindbladcoefficients.py index cbfee77c2..5ac5fdc7c 100644 --- a/pygsti/modelmembers/operations/lindbladcoefficients.py +++ b/pygsti/modelmembers/operations/lindbladcoefficients.py @@ -850,7 +850,6 @@ def from_vector(self, v): else: raise ValueError("Internal error: invalid block type!") - #def paramvals_to_coefficients_deriv(self, parameter_values, cache_mx=None): def deriv_wrt_params(self, v=None): """ Construct derivative of Lindblad coefficients (for this block) from a set of parameter values. diff --git a/pygsti/modelmembers/operations/lindbladerrorgen.py b/pygsti/modelmembers/operations/lindbladerrorgen.py index bbf18ee93..ae2fba90c 100644 --- a/pygsti/modelmembers/operations/lindbladerrorgen.py +++ b/pygsti/modelmembers/operations/lindbladerrorgen.py @@ -658,30 +658,6 @@ def to_sparse(self, on_space='minimal'): else: # dense rep return _sps.csr_matrix(self.to_dense(on_space)) - #def torep(self): - # """ - # Return a "representation" object for this error generator. - # - # Such objects are primarily used internally by pyGSTi to compute - # things like probabilities more efficiently. - # - # Returns - # ------- - # OpRep - # """ - # if self._evotype == "densitymx": - # if self._rep_type == 'sparse superop': - # A = self.err_gen_mx - # return replib.DMOpRepSparse( - # _np.ascontiguousarray(A.data), - # _np.ascontiguousarray(A.indices, _np.int64), - # _np.ascontiguousarray(A.indptr, _np.int64)) - # else: - # return replib.DMOpRepDense(_np.ascontiguousarray(self.err_gen_mx, 'd')) - # else: - # raise NotImplementedError("torep(%s) not implemented for %s objects!" % - # (self._evotype, self.__class__.__name__)) - def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys=False): """ Get the `order`-th order Taylor-expansion terms of this operation. @@ -730,11 +706,6 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= self._rep.Lterms, self._rep.Lterm_coeffs = self._init_terms(Lblocks, max_polynomial_vars) return self._rep.Lterms # terms with local-index polynomial coefficients - #def get_direct_order_terms(self, order): # , order_base=None - unused currently b/c order is always 0... - # v = self.to_vector() - # poly_terms = self.get_taylor_order_terms(order) - # return [ term.evaluate_coeff(v) for term in poly_terms ] - @property def total_term_magnitude(self): """ @@ -1223,56 +1194,6 @@ def transform_inplace(self, s): raise ValueError("Invalid transform for this LindbladErrorgen: type %s" % str(type(s))) - #I don't think this is ever needed - #def spam_transform_inplace(self, s, typ): - # """ - # Update operation matrix `O` with `inv(s) * O` OR `O * s`, depending on the value of `typ`. - # - # This functions as `transform_inplace(...)` but is used when this - # Lindblad-parameterized operation is used as a part of a SPAM - # vector. When `typ == "prep"`, the spam vector is assumed - # to be `rho = dot(self, )`, which transforms as - # `rho -> inv(s) * rho`, so `self -> inv(s) * self`. When - # `typ == "effect"`, `e.dag = dot(e.dag, self)` (not that - # `self` is NOT `self.dag` here), and `e.dag -> e.dag * s` - # so that `self -> self * s`. - # - # Parameters - # ---------- - # s : GaugeGroupElement - # A gauge group element which specifies the "s" matrix - # (and it's inverse) used in the above similarity transform. - # - # typ : { 'prep', 'effect' } - # Which type of SPAM vector is being transformed (see above). - # - # Returns - # ------- - # None - # """ - # assert(typ in ('prep', 'effect')), "Invalid `typ` argument: %s" % typ - # - # if isinstance(s, _gaugegroup.UnitaryGaugeGroupElement) or \ - # isinstance(s, _gaugegroup.TPSpamGaugeGroupElement): - # U = s.transform_matrix - # Uinv = s.transform_matrix_inverse - # err_gen_mx = self.to_sparse() if self._rep_type == 'sparse superop' else self.to_dense() - # - # #just act on postfactor and Lindbladian exponent: - # if typ == "prep": - # err_gen_mx = _mt.safe_dot(Uinv, err_gen_mx) - # else: - # err_gen_mx = _mt.safe_dot(err_gen_mx, U) - # - # self._set_params_from_matrix(err_gen_mx, truncate=True) - # self.dirty = True - # #Note: truncate=True above because some unitary transforms seem to - # ## modify eigenvalues to be negative beyond the tolerances - # ## checked when truncate == False. - # else: - # raise ValueError("Invalid transform for this LindbladDenseOp: type %s" - # % str(type(s))) - def deriv_wrt_params(self, wrt_filter=None): """ The element-wise derivative this operation. diff --git a/pygsti/modelmembers/operations/linearop.py b/pygsti/modelmembers/operations/linearop.py index 6eb9bbdd6..c86352a9f 100644 --- a/pygsti/modelmembers/operations/linearop.py +++ b/pygsti/modelmembers/operations/linearop.py @@ -128,28 +128,6 @@ def set_time(self, t): """ pass - #def rep_at_time(self, t): - # """ - # Retrieves a representation of this operator at time `t`. - # - # This is operationally equivalent to calling `self.set_time(t)` and - # then retrieving `self._rep`. However, what is returned from this function - # need not be the same rep object for different times, allowing the - # operator object to cache many reps for different times to increase performance - # (this avoids having to initialize the same rep at a given time). - # - # Parameters - # ---------- - # t : float - # The time. - # - # Returns - # ------- - # object - # """ - # self.set_time(t) - # return self._rep - def to_dense(self, on_space='minimal'): """ Return this operation as a dense matrix. diff --git a/pygsti/modelmembers/operations/repeatedop.py b/pygsti/modelmembers/operations/repeatedop.py index f5c21deed..888e1a95b 100644 --- a/pygsti/modelmembers/operations/repeatedop.py +++ b/pygsti/modelmembers/operations/repeatedop.py @@ -113,29 +113,6 @@ def to_dense(self, on_space='minimal'): op = self.repeated_op.to_dense(on_space) return _np.linalg.matrix_power(op, self.num_repetitions) - #def torep(self): - # """ - # Return a "representation" object for this operation. - # - # Such objects are primarily used internally by pyGSTi to compute - # things like probabilities more efficiently. - # - # Returns - # ------- - # OpRep - # """ - # if self._evotype == "densitymx": - # return replib.DMOpRepExponentiated(self.repeated_op.torep(), self.power, self.dim) - # elif self._evotype == "statevec": - # return replib.SVOpRepExponentiated(self.repeated_op.torep(), self.power, self.dim) - # elif self._evotype == "stabilizer": - # nQubits = int(round(_np.log2(self.dim))) # "stabilizer" is a unitary-evolution type mode - # return replib.SVOpRepExponentiated(self.repeated_op.torep(), self.power, nQubits) - # assert(False), "Invalid internal _evotype: %s" % self._evotype - - #FUTURE: term-related functions (maybe base off of ComposedOp or use a composedop to generate them?) - # e.g. ComposedOp([self.repeated_op] * power, dim, evotype) - @property def parameter_labels(self): """ diff --git a/pygsti/modelmembers/povms/basepovm.py b/pygsti/modelmembers/povms/basepovm.py index 4e4bd0ced..0ff25e937 100644 --- a/pygsti/modelmembers/povms/basepovm.py +++ b/pygsti/modelmembers/povms/basepovm.py @@ -167,40 +167,6 @@ def _from_memoized_dict(cls, mm_dict, serial_memo): for lbl, subm_serial_id in zip(mm_dict['effect_labels'], mm_dict['submembers'])} return cls(effects, mm_dict['evotype'], state_space) # Note: __init__ call signature of derived classes - #def _reset_member_gpindices(self): - # """ - # Sets gpindices for all non-complement items. Assumes all non-complement - # vectors have *independent* parameters (for now). - # """ - # Np = 0 - # for k, effect in self.items(): - # if k == self.complement_label: continue - # N = effect.num_params - # pslc = slice(Np, Np + N) - # if effect.gpindices != pslc: - # effect.set_gpindices(pslc, self) - # Np += N - # self.Np = Np - # - #def _rebuild_complement(self, identity_for_complement=None): - # """ Rebuild complement vector (in case other vectors have changed) """ - # - # if self.complement_label is not None and self.complement_label in self: - # non_comp_effects = [v for k, v in self.items() - # if k != self.complement_label] - # - # if identity_for_complement is None: - # identity_for_complement = self[self.complement_label].identity - # - # complement_effect = _ComplementPOVMEffect( - # identity_for_complement, non_comp_effects) - # complement_effect.set_gpindices(slice(0, self.Np), self) # all parameters - # - # #Assign new complement effect without calling our __setitem__ - # old_ro = self._readonly; self._readonly = False - # _POVM.__setitem__(self, self.complement_label, complement_effect) - # self._readonly = old_ro - def __setitem__(self, key, value): if not self._readonly: # when readonly == False, we're initializing return super(_BasePOVM, self).__setitem__(key, value) diff --git a/pygsti/modelmembers/povms/marginalizedpovm.py b/pygsti/modelmembers/povms/marginalizedpovm.py index a56595287..9d8dd3029 100644 --- a/pygsti/modelmembers/povms/marginalizedpovm.py +++ b/pygsti/modelmembers/povms/marginalizedpovm.py @@ -207,85 +207,6 @@ def __reduce__(self): self.sslbls_after_marginalizing), {'_gpindices': self._gpindices}) # preserve gpindices (but not parent) - #May need to implement this in future if we allow non-static MarginalizedPOVMs - #def allocate_gpindices(self, starting_index, parent, memo=None): - # """ - # Sets gpindices array for this object or any objects it - # contains (i.e. depends upon). Indices may be obtained - # from contained objects which have already been initialized - # (e.g. if a contained object is shared with other - # top-level objects), or given new indices starting with - # `starting_index`. - # - # Parameters - # ---------- - # starting_index : int - # The starting index for un-allocated parameters. - # - # parent : Model or ModelMember - # The parent whose parameter array gpindices references. - # - # memo : set, optional - # Used to prevent duplicate calls and self-referencing loops. If - # `memo` contains an object's id (`id(self)`) then this routine - # will exit immediately. - # - # Returns - # ------- - # num_new: int - # The number of *new* allocated parameters (so - # the parent should mark as allocated parameter - # indices `starting_index` to `starting_index + new_new`). - # """ - # if memo is None: memo = set() - # if id(self) in memo: return 0 - # memo.add(id(self)) - # - # assert(self.base_povm.num_params == 0) # so no need to do anything w/base_povm - # num_new_params = self.error_map.allocate_gpindices(starting_index, parent, memo) # *same* parent as self - # _mm.ModelMember.set_gpindices( - # self, self.error_map.gpindices, parent) - # return num_new_params - - #def relink_parent(self, parent): # Unnecessary? - # """ - # Sets the parent of this object *without* altering its gpindices. - # - # In addition to setting the parent of this object, this method - # sets the parent of any objects this object contains (i.e. - # depends upon) - much like allocate_gpindices. To ensure a valid - # parent is not overwritten, the existing parent *must be None* - # prior to this call. - # """ - # self.povm_to_marginalize.relink_parent(parent) - # _mm.ModelMember.relink_parent(self, parent) - - #def set_gpindices(self, gpindices, parent, memo=None): - # """ - # Set the parent and indices into the parent's parameter vector that - # are used by this ModelMember object. - # - # Parameters - # ---------- - # gpindices : slice or integer ndarray - # The indices of this objects parameters in its parent's array. - # - # parent : Model or ModelMember - # The parent whose parameter array gpindices references. - # - # Returns - # ------- - # None - # """ - # if memo is None: memo = set() - # elif id(self) in memo: return - # memo.add(id(self)) - # - # assert(self.base_povm.num_params == 0) # so no need to do anything w/base_povm - # self.error_map.set_gpindices(gpindices, parent, memo) - # self.terms = {} # clear terms cache since param indices have changed now - # _mm.ModelMember._set_only_my_gpindices(self, gpindices, parent) - def simplify_effects(self, prefix=""): """ Creates a dictionary of simplified effect vectors. diff --git a/pygsti/modelmembers/states/fullpurestate.py b/pygsti/modelmembers/states/fullpurestate.py index 771ebd81f..a3da3cc5e 100644 --- a/pygsti/modelmembers/states/fullpurestate.py +++ b/pygsti/modelmembers/states/fullpurestate.py @@ -44,30 +44,6 @@ def __init__(self, purevec, basis="pp", evotype="default", state_space=None): self._paramlbls = _np.array(["VecElement Re(%d)" % i for i in range(self.state_space.udim)] + ["VecElement Im(%d)" % i for i in range(self.state_space.udim)], dtype=object) - #REMOVE (Cannot set to arbitrary vector) - but maybe could set to pure vector? - #def set_dense(self, vec): - # """ - # Set the dense-vector value of this SPAM vector. - # - # Attempts to modify this SPAM vector's parameters so that the raw - # SPAM vector becomes `vec`. Will raise ValueError if this operation - # is not possible. - # - # Parameters - # ---------- - # vec : array_like or State - # A numpy array representing a SPAM vector, or a State object. - # - # Returns - # ------- - # None - # """ - # vec = State._to_vector(vec) - # if(vec.size != self.dim): - # raise ValueError("Argument must be length %d" % self.dim) - # self._ptr[:] = vec - # self.dirty = True - @property def num_params(self): """ diff --git a/pygsti/modelmembers/term.py b/pygsti/modelmembers/term.py index b0ca406a6..2de25a3b1 100644 --- a/pygsti/modelmembers/term.py +++ b/pygsti/modelmembers/term.py @@ -272,16 +272,6 @@ def __mul__(self, x): def __rmul__(self, x): return self.__mul__(x) - #Not needed - but we would use this if we changed - # the "effect term" convention so that the pre/post ops - # were associated with the pre/post effect vector and - # not vice versa (right now the post effect is preceded - # by the *pre* ops, and vice versa). If the reverse - # were true we'd need to conjugate the terms created - # for ComposedPOVMEffect objects, for example. - #def conjugate(self): - # return self.__class__(self._rep.conjugate()) - class _HasMagnitude(object): """ @@ -718,9 +708,6 @@ def coeff(self): """ return _Polynomial.from_rep(self._rep.coeff) - #def _coeff_copy(self): - # return self.coeff.copy() - def map_indices_inplace(self, mapfn): """ Performs a bulk find & replace on the coefficient polynomial's variable indices. diff --git a/pygsti/models/explicitmodel.py b/pygsti/models/explicitmodel.py index 2faa9c955..bae28bbd8 100644 --- a/pygsti/models/explicitmodel.py +++ b/pygsti/models/explicitmodel.py @@ -186,16 +186,6 @@ def _excalc(self): return _explicitcalc.ExplicitOpModelCalc(self.state_space.dim, simplified_preps, simplified_ops, simplified_effects, self.num_params, self._param_interposer) - #Unneeded - just use string processing & rely on effect labels *not* having underscores in them - #def simplify_spamtuple_to_outcome_label(self, simplified_spamTuple): - # #TODO: make this more efficient (prep lbl isn't even used!) - # for prep_lbl in self.preps: - # for povm_lbl in self.povms: - # for elbl in self.povms[povm_lbl]: - # if simplified_spamTuple == (prep_lbl, povm_lbl + "_" + elbl): - # return (elbl,) # outcome "label" (a tuple) - # raise ValueError("No outcome label found for simplified spam_tuple: ", simplified_spamTuple) - def _embed_operation(self, op_target_labels, op_val, force=False): """ Called by OrderedMemberDict._auto_embed to create an embedded-gate diff --git a/pygsti/models/memberdict.py b/pygsti/models/memberdict.py index 833c389b0..59f923d88 100644 --- a/pygsti/models/memberdict.py +++ b/pygsti/models/memberdict.py @@ -45,18 +45,6 @@ def __setitem__(self, key, val): "beginning with the prefix '%s'" % self._prefix) super(_PrefixOrderedDict, self).__setitem__(key, val) - #Handled by derived classes - #def __reduce__(self): - # items = [(k,v) for k,v in self.iteritems()] - # return (_PrefixOrderedDict, (self._prefix, items), None) - - """ - An ordered dictionary whose keys must begin with a given prefix, - and which holds LinearOperator objects. This class ensures that every value is a - :class:`LinearOperator`-derived object by converting any non-`LinearOperator` values into - `LinearOperator`s upon assignment and raising an error if this is not possible. - """ - class OrderedMemberDict(_PrefixOrderedDict, _mm.ModelChild): """ diff --git a/pygsti/objectivefns/objectivefns.py b/pygsti/objectivefns/objectivefns.py index 208bdb46d..f2a19b9f6 100644 --- a/pygsti/objectivefns/objectivefns.py +++ b/pygsti/objectivefns/objectivefns.py @@ -112,9 +112,6 @@ def _objfn(objfn_cls, model, dataset, circuits=None, return ofn - #def __len__(self): - # return len(self.circuits) - class ObjectiveFunctionBuilder(_NicelySerializable): """ @@ -1461,96 +1458,6 @@ def approximate_hessian(self, paramvec=None): """ raise NotImplementedError("Derived classes should implement this!") - #MOVED - but these versions have updated names - #def _persistent_memory_estimate(self, num_elements=None): - # # Estimate & check persistent memory (from allocs within objective function) - # """ - # Compute the amount of memory needed to perform evaluations of this objective function. - # - # This number includes both intermediate and final results, and assumes - # that the types of evauations given by :meth:`_evaltree_subcalls` - # are required. - # - # Parameters - # ---------- - # num_elements : int, optional - # The number of elements (circuit outcomes) that will be computed. - # - # Returns - # ------- - # int - # """ - # if num_elements is None: - # nout = int(round(_np.sqrt(self.mdl.dim))) # estimate of avg number of outcomes per string - # nc = len(self.circuits) - # ne = nc * nout # estimate of the number of elements (e.g. probabilities, # LS terms, etc) to compute - # else: - # ne = num_elements - # np = self.mdl.num_params - # - # # "persistent" memory is that used to store the final results. - # obj_fn_mem = FLOATSIZE * ne - # jac_mem = FLOATSIZE * ne * np - # hess_mem = FLOATSIZE * ne * np**2 - # persistent_mem = 4 * obj_fn_mem + jac_mem # 4 different objective-function sized arrays, 1 jacobian array? - # if any([nm == "bulk_fill_hprobs" for nm in self._evaltree_subcalls()]): - # persistent_mem += hess_mem # we need room for the hessian too! - # # TODO: what about "bulk_hprobs_by_block"? - # - # return persistent_mem - # - #def _evaltree_subcalls(self): - # """ - # The types of calls that will be made to an evaluation tree. - # - # This information is used for memory estimation purposes. - # - # Returns - # ------- - # list - # """ - # calls = ["bulk_fill_probs", "bulk_fill_dprobs"] - # if self.enable_hessian: calls.append("bulk_fill_hprobs") - # return calls - # - #def num_data_params(self): - # """ - # The number of degrees of freedom in the data used by this objective function. - # - # Returns - # ------- - # int - # """ - # return self.dataset.degrees_of_freedom(self.ds_circuits, - # aggregate_times=not self.time_dependent) - - #def _precompute_omitted_freqs(self): - # """ - # Detect omitted frequences (assumed to be 0) so we can compute objective fn correctly - # """ - # self.firsts = []; self.indicesOfCircuitsWithOmittedData = [] - # for i, c in enumerate(self.circuits): - # lklen = _slct.length(self.lookup[i]) - # if 0 < lklen < self.mdl.compute_num_outcomes(c): - # self.firsts.append(_slct.to_array(self.lookup[i])[0]) - # self.indicesOfCircuitsWithOmittedData.append(i) - # if len(self.firsts) > 0: - # self.firsts = _np.array(self.firsts, 'i') - # self.indicesOfCircuitsWithOmittedData = _np.array(self.indicesOfCircuitsWithOmittedData, 'i') - # self.dprobs_omitted_rowsum = _np.empty((len(self.firsts), self.nparams), 'd') - # self.raw_objfn.printer.log("SPARSE DATA: %d of %d rows have sparse data" % - # (len(self.firsts), len(self.circuits))) - # else: - # self.firsts = None # no omitted probs - # - #def _compute_count_vectors(self): - # """ - # Ensure self.cache contains count and total-count vectors. - # """ - # if not self.cache.has_count_vectors(): - # self.cache.add_count_vectors(self.dataset, self.ds_circuits, self.circuit_weights) - # return self.cache.counts, self.cache.total_counts - def _construct_hessian(self, counts, total_counts, prob_clip_interval): """ Framework for constructing a hessian matrix row by row using a derived @@ -6499,10 +6406,6 @@ def __init__(self, logl_objective_fn, base_pt, wildcard): self.logl_objfn.resource_alloc.add_tracked_memory(self.logl_objfn.probs.size) self.probs = self.logl_objfn.probs.copy() - #def _default_evalpt(self): - # """The default point to evaluate functions at """ - # return self.wildcard_budget.to_vector() - #Mimic the underlying LogL objective def __getattr__(self, attr): return getattr(self.__dict__['logl_objfn'], attr) # use __dict__ so no chance for recursive __getattr__ diff --git a/pygsti/objectivefns/wildcardbudget.py b/pygsti/objectivefns/wildcardbudget.py index f036b1590..49e62ea53 100644 --- a/pygsti/objectivefns/wildcardbudget.py +++ b/pygsti/objectivefns/wildcardbudget.py @@ -143,11 +143,6 @@ def description(self): """ raise NotImplementedError("Derived classes must implement `description`") - #def compute_circuit_wildcard_budget(c, w_vec): - # #raise NotImplementedError("TODO!!!") - # #for now, assume w_vec is a length-1 vector - # return abs(w_vec[0]) * len(c) - def precompute_for_same_circuits(self, circuits): """ Compute a pre-computed quantity for speeding up circuit calculations. diff --git a/pygsti/optimize/customcg.py b/pygsti/optimize/customcg.py index de28fb304..a89f70d56 100644 --- a/pygsti/optimize/customcg.py +++ b/pygsti/optimize/customcg.py @@ -244,15 +244,3 @@ def _finite_diff_dfdx_and_bdflag(f, x, delta): #completely undefined return dfdx, bd - -#def f6(param): -# '''Schaffer's F6 function''' -# para = param*10 -# para = param[0:2] -# num = (sin(sqrt((para[0] * para[0]) + (para[1] * para[1])))) * \ -# (sin(sqrt((para[0] * para[0]) + (para[1] * para[1])))) - 0.5 -# denom = (1.0 + 0.001 * ((para[0] * para[0]) + (para[1] * para[1]))) * \ -# (1.0 + 0.001 * ((para[0] * para[0]) + (para[1] * para[1]))) -# f6 = 0.5 - (num/denom) -# errorf6 = 1 - f6 -# return f6, errorf6; diff --git a/pygsti/optimize/customlm.py b/pygsti/optimize/customlm.py index cbaa9b513..89b749a3a 100644 --- a/pygsti/optimize/customlm.py +++ b/pygsti/optimize/customlm.py @@ -895,7 +895,6 @@ def dclip(ar): return ar reject_msg = "" if profiler: profiler.memory_check("custom_leastsq: after linsolve") if success: # linear solve succeeded - #dx = _hack_dx(obj_fn, x, dx, Jac, JTJ, JTf, f, norm_f) if damping_mode != 'adaptive': new_x[:] = x + dx @@ -1315,239 +1314,127 @@ def dclip(ar): return ar #return solution -def _hack_dx(obj_fn, x, dx, jac, jtj, jtf, f, norm_f): - #HACK1 - #if nRejects >= 2: - # dx = -(10.0**(1-nRejects))*x - # print("HACK - setting dx = -%gx!" % 10.0**(1-nRejects)) - # return dx - - #HACK2 - if True: - print("HACK2 - trying to find a good dx by iteratively stepping in each direction...") - - test_f = obj_fn(x + dx); cmp_normf = _np.dot(test_f, test_f) - print("Compare with suggested step => ", cmp_normf) - STEP = 0.0001 - - #import bpdb; bpdb.set_trace() - #gradient = -jtf - test_dx = _np.zeros(len(dx), 'd') - last_normf = norm_f - for ii in range(len(dx)): - - #Try adding - while True: - test_dx[ii] += STEP - test_f = obj_fn(x + test_dx); test_normf = _np.dot(test_f, test_f) - if test_normf < last_normf: - last_normf = test_normf +""" +def custom_leastsq_wikip(obj_fn, jac_fn, x0, f_norm_tol=1e-6, jac_norm_tol=1e-6, + rel_tol=1e-6, max_iter=100, comm=None, verbosity=0, profiler=None): + # + # Wikipedia-version of LM algorithm, testing mu and mu/nu damping params and taking + # mu/nu => new_mu if acceptable... This didn't seem to perform well, but maybe just + # needs some tweaking, so leaving it commented here for reference + # + msg = "" + converged = False + x = x0 + f = obj_fn(x) + norm_f = _np.linalg.norm(f) + tau = 1e-3 #initial mu + nu = 1.3 + my_cols_slice = None + + + if not _np.isfinite(norm_f): + msg = "Infinite norm of objective function at initial point!" + + for k in range(max_iter): #outer loop + # assume x, f, fnorm hold valid values + + if len(msg) > 0: + break #exit outer loop if an exit-message has been set + + if norm_f < f_norm_tol: + msg = "norm(objectivefn) is small" + converged = True; break + + if verbosity > 0: + print("--- Outer Iter %d: norm_f = %g" % (k,norm_f)) + + if profiler: profiler.mem_check("custom_leastsq: begin outer iter *before de-alloc*") + jac = None; jtj = None; jtf = None + + if profiler: profiler.mem_check("custom_leastsq: begin outer iter") + jac = jac_fn(x) + if profiler: profiler.mem_check("custom_leastsq: after jacobian:" + + "shape=%s, GB=%.2f" % (str(jac.shape), + jac.nbytes/(1024.0**3)) ) + + tm = _time.time() + if my_cols_slice is None: + my_cols_slice = _mpit.distribute_for_dot(jac.shape[0], comm) + jtj = _mpit.mpidot(jac.T,jac,my_cols_slice,comm) #_np.dot(jac.T,jac) + jtf = _np.dot(jac.T,f) + if profiler: profiler.add_time("custom_leastsq: dotprods",tm) + + idiag = _np.diag_indices_from(jtj) + norm_JTf = _np.linalg.norm(jtf) #, ord='inf') + norm_x = _np.linalg.norm(x) + undampled_JTJ_diag = jtj.diagonal().copy() + + if norm_JTf < jac_norm_tol: + msg = "norm(jacobian) is small" + converged = True; break + + if k == 0: + mu = tau #* _np.max(undampled_JTJ_diag) # initial damping element + #mu = tau #* _np.max(undampled_JTJ_diag) # initial damping element + + #determing increment using adaptive damping + while True: #inner loop + + ### Evaluate with mu' = mu / nu + mu = mu / nu + if profiler: profiler.mem_check("custom_leastsq: begin inner iter") + jtj[idiag] *= (1.0 + mu) # augment normal equations + #jtj[idiag] += mu # augment normal equations + + try: + if profiler: profiler.mem_check("custom_leastsq: before linsolve") + tm = _time.time() + success = True + dx = _np.linalg.solve(jtj, -jtf) + if profiler: profiler.add_time("custom_leastsq: linsolve",tm) + except _np.linalg.LinAlgError: + success = False + + if profiler: profiler.mem_check("custom_leastsq: after linsolve") + if success: #linear solve succeeded + new_x = x + dx + norm_dx = _np.linalg.norm(dx) + + #if verbosity > 1: + # print("--- Inner Loop: mu=%g, norm_dx=%g" % (mu,norm_dx)) + + if norm_dx < rel_tol*norm_x: #use squared qtys instead (speed)? + msg = "relative change in x is small" + converged = True; break + + if norm_dx > (norm_x+rel_tol)/_MACH_PRECISION: + msg = "(near-)singular linear system"; break + + new_f = obj_fn(new_x) + if profiler: profiler.mem_check("custom_leastsq: after obj_fn") + norm_new_f = _np.linalg.norm(new_f) + if not _np.isfinite(norm_new_f): # avoid infinite loop... + msg = "Infinite norm of objective function!"; break + + dF = norm_f - norm_new_f + if dF > 0: #accept step + #print(" Accepted!") + x,f, norm_f = new_x, new_f, norm_new_f + nu = 1.3 + break # exit inner loop normally else: - test_dx[ii] -= STEP - break - - if test_dx[ii] == 0: # then try subtracting - while True: - test_dx[ii] -= STEP - test_f = obj_fn(x + test_dx); test_normf = _np.dot(test_f, test_f) - if test_normf < last_normf: - last_normf = test_normf - else: - test_dx[ii] += STEP - break - - if abs(test_dx[ii]) > 1e-6: - test_prediction = norm_f + _np.dot(-2 * jtf, test_dx) - tp2_f = f + _np.dot(jac, test_dx) - test_prediction2 = _np.dot(tp2_f, tp2_f) - cmp_dx = dx # -jtf - print(" -> Adjusting index ", ii, ":", x[ii], "+", test_dx[ii], " => ", last_normf, "(cmp w/dx: ", - cmp_dx[ii], test_prediction, test_prediction2, ") ", - "YES" if test_dx[ii] * cmp_dx[ii] > 0 else "NO") - - if _np.linalg.norm(test_dx) > 0 and last_normf < cmp_normf: - print("FOUND HACK dx w/norm = ", _np.linalg.norm(test_dx)) - return test_dx - else: - print("KEEPING ORIGINAL dx") - - #HACK3 - if False: - print("HACK3 - checking if there's a simple dx that is better...") - test_f = obj_fn(x + dx); cmp_normf = _np.dot(test_f, test_f) - orig_prediction = norm_f + _np.dot(2 * jtf, dx) - Jdx = _np.dot(jac, dx) - op2_f = f + Jdx - orig_prediction2 = _np.dot(op2_f, op2_f) - # main objective = fT*f = norm_f - # at new x => (f+J*dx)T * (f+J*dx) = norm_f + JdxT*f + fT*Jdx - # = norm_f + 2*(fT*J)dx (b/c transpose of real# does nothing) - # = norm_f + 2*dxT*(JT*f) - # prediction 2 also includes (J*dx)T * (J*dx) term = dxT * (jtj) * dx - orig_prediction3 = orig_prediction + _np.dot(Jdx, Jdx) - norm_dx = _np.linalg.norm(dx) - print("Compare with suggested |dx| = ", norm_dx, " => ", cmp_normf, - "(predicted: ", orig_prediction, orig_prediction2, orig_prediction3) - STEP = norm_dx # 0.0001 - - #import bpdb; bpdb.set_trace() - test_dx = _np.zeros(len(dx), 'd') - best_ii = -1; best_normf = norm_f; best_dx = 0 - for ii in range(len(dx)): - - #Try adding a small amount - test_dx[ii] = STEP - test_f = obj_fn(x + test_dx); test_normf = _np.dot(test_f, test_f) - if test_normf < best_normf: - best_normf = test_normf - best_dx = STEP - best_ii = ii + mu *= nu #increase mu else: - test_dx[ii] = -STEP - test_f = obj_fn(x + test_dx); test_normf = _np.dot(test_f, test_f) - if test_normf < best_normf: - best_normf = test_normf - best_dx = -STEP - best_ii = ii - test_dx[ii] = 0 - - test_dx[best_ii] = best_dx - test_prediction = norm_f + _np.dot(2 * jtf, test_dx) - tp2_f = f + _np.dot(jac, test_dx) - test_prediction2 = _np.dot(tp2_f, tp2_f) - - jj = _np.argmax(_np.abs(dx)) - print("Best decrease = index", best_ii, ":", x[best_ii], '+', best_dx, "==>", - best_normf, " (predictions: ", test_prediction, test_prediction2, ")") - print(" compare with original dx[", best_ii, "]=", dx[best_ii], - "YES" if test_dx[best_ii] * dx[best_ii] > 0 else "NO") - print(" max of abs(dx) is index ", jj, ":", dx[jj], "yes" if jj == best_ii else "no") - - if _np.linalg.norm(test_dx) > 0 and best_normf < cmp_normf: - print("FOUND HACK dx w/norm = ", _np.linalg.norm(test_dx)) - return test_dx - else: - print("KEEPING ORIGINAL dx") - return dx - - -#Wikipedia-version of LM algorithm, testing mu and mu/nu damping params and taking -# mu/nu => new_mu if acceptable... This didn't seem to perform well, but maybe just -# needs some tweaking, so leaving it commented here for reference -#def custom_leastsq_wikip(obj_fn, jac_fn, x0, f_norm_tol=1e-6, jac_norm_tol=1e-6, -# rel_tol=1e-6, max_iter=100, comm=None, verbosity=0, profiler=None): -# msg = "" -# converged = False -# x = x0 -# f = obj_fn(x) -# norm_f = _np.linalg.norm(f) -# tau = 1e-3 #initial mu -# nu = 1.3 -# my_cols_slice = None -# -# -# if not _np.isfinite(norm_f): -# msg = "Infinite norm of objective function at initial point!" -# -# for k in range(max_iter): #outer loop -# # assume x, f, fnorm hold valid values -# -# if len(msg) > 0: -# break #exit outer loop if an exit-message has been set -# -# if norm_f < f_norm_tol: -# msg = "norm(objectivefn) is small" -# converged = True; break -# -# if verbosity > 0: -# print("--- Outer Iter %d: norm_f = %g" % (k,norm_f)) -# -# if profiler: profiler.mem_check("custom_leastsq: begin outer iter *before de-alloc*") -# jac = None; jtj = None; jtf = None -# -# if profiler: profiler.mem_check("custom_leastsq: begin outer iter") -# jac = jac_fn(x) -# if profiler: profiler.mem_check("custom_leastsq: after jacobian:" -# + "shape=%s, GB=%.2f" % (str(jac.shape), -# jac.nbytes/(1024.0**3)) ) -# -# tm = _time.time() -# if my_cols_slice is None: -# my_cols_slice = _mpit.distribute_for_dot(jac.shape[0], comm) -# jtj = _mpit.mpidot(jac.T,jac,my_cols_slice,comm) #_np.dot(jac.T,jac) -# jtf = _np.dot(jac.T,f) -# if profiler: profiler.add_time("custom_leastsq: dotprods",tm) -# -# idiag = _np.diag_indices_from(jtj) -# norm_JTf = _np.linalg.norm(jtf) #, ord='inf') -# norm_x = _np.linalg.norm(x) -# undampled_JTJ_diag = jtj.diagonal().copy() -# -# if norm_JTf < jac_norm_tol: -# msg = "norm(jacobian) is small" -# converged = True; break -# -# if k == 0: -# mu = tau #* _np.max(undampled_JTJ_diag) # initial damping element -# #mu = tau #* _np.max(undampled_JTJ_diag) # initial damping element -# -# #determing increment using adaptive damping -# while True: #inner loop -# -# ### Evaluate with mu' = mu / nu -# mu = mu / nu -# if profiler: profiler.mem_check("custom_leastsq: begin inner iter") -# jtj[idiag] *= (1.0 + mu) # augment normal equations -# #jtj[idiag] += mu # augment normal equations -# -# try: -# if profiler: profiler.mem_check("custom_leastsq: before linsolve") -# tm = _time.time() -# success = True -# dx = _np.linalg.solve(jtj, -jtf) -# if profiler: profiler.add_time("custom_leastsq: linsolve",tm) -# except _np.linalg.LinAlgError: -# success = False -# -# if profiler: profiler.mem_check("custom_leastsq: after linsolve") -# if success: #linear solve succeeded -# new_x = x + dx -# norm_dx = _np.linalg.norm(dx) -# -# #if verbosity > 1: -# # print("--- Inner Loop: mu=%g, norm_dx=%g" % (mu,norm_dx)) -# -# if norm_dx < rel_tol*norm_x: #use squared qtys instead (speed)? -# msg = "relative change in x is small" -# converged = True; break -# -# if norm_dx > (norm_x+rel_tol)/_MACH_PRECISION: -# msg = "(near-)singular linear system"; break -# -# new_f = obj_fn(new_x) -# if profiler: profiler.mem_check("custom_leastsq: after obj_fn") -# norm_new_f = _np.linalg.norm(new_f) -# if not _np.isfinite(norm_new_f): # avoid infinite loop... -# msg = "Infinite norm of objective function!"; break -# -# dF = norm_f - norm_new_f -# if dF > 0: #accept step -# #print(" Accepted!") -# x,f, norm_f = new_x, new_f, norm_new_f -# nu = 1.3 -# break # exit inner loop normally -# else: -# mu *= nu #increase mu -# else: -# #Linear solve failed: -# mu *= nu #increase mu -# nu = 2*nu -# -# jtj[idiag] = undampled_JTJ_diag #restore diagonal for next inner loop iter -# #end of inner loop -# #end of outer loop -# else: -# #if no break stmt hit, then we've exceeded max_iter -# msg = "Maximum iterations (%d) exceeded" % max_iter -# -# return x, converged, msg + #Linear solve failed: + mu *= nu #increase mu + nu = 2*nu + + jtj[idiag] = undampled_JTJ_diag #restore diagonal for next inner loop iter + #end of inner loop + #end of outer loop + else: + #if no break stmt hit, then we've exceeded max_iter + msg = "Maximum iterations (%d) exceeded" % max_iter + + return x, converged, msg +""" diff --git a/pygsti/optimize/optimize.py b/pygsti/optimize/optimize.py index fcb0835f0..7411eb4d1 100644 --- a/pygsti/optimize/optimize.py +++ b/pygsti/optimize/optimize.py @@ -662,90 +662,6 @@ def _evaluate(individual): return solution -#def fmin_homebrew(f, x0, maxiter): -# """ -# Cooked up by Erik, this algorithm is similar to basinhopping but with some tweaks. -# -# Parameters -# ---------- -# fn : function -# The function to minimize. -# -# x0 : numpy array -# The starting point (argument to fn). -# -# maxiter : int -# The maximum number of iterations. -# -# Returns -# ------- -# scipy.optimize.Result object -# Includes members 'x', 'fun', 'success', and 'message'. -# """ -# -# STEP = 0.01 -# MAX_STEPS = int(2.0 / STEP) # allow a change of at most 2.0 -# MAX_DIR_TRIES = 1000 -# T = 1.0 -# -# global_best_params = cur_x0 = x0 -# global_best = cur_f = f(x0) -# N = len(x0) -# trial_x0 = x0.copy() -# -# for it in range(maxiter): -# -# #Minimize using L-BFGS-B -# opts = {'maxiter': maxiter, 'maxfev': maxiter, 'disp': False } -# soln = _spo.minimize(f,trial_x0,options=opts, method='L-BFGS-B',callback=None, tol=1e-8) -# -# # Update global best -# if soln.fun < global_best: -# global_best_params = soln.x -# global_best = soln.fun -# -# #check if we accept the new minimum -# if soln.fun < cur_f or _np.random.random() < _np.exp( -(soln.fun - cur_f)/T ): -# cur_x0 = soln.x; cur_f = soln.fun -# print "Iter %d: f=%g accepted -- global best = %g" % (it, cur_f, global_best) -# else: -# print "Iter %d: f=%g declined" % (it, cur_f) -# -# trial_x0 = None; numTries = 0 -# while trial_x0 is None and numTries < MAX_DIR_TRIES: -# #choose a random direction -# direction = _np.random.random( N ) -# numTries += 1 -# -# #print "DB: test dir %d" % numTries #DEBUG -# -# #kick solution along random direction until the value of f starts to get smaller again (if it ever does) -# # (this indicates we've gone over a maximum along this direction) -# last_f = cur_f -# for i in range(1,MAX_STEPS): -# test_x = cur_x0 + i*STEP * direction -# test_f = f(test_x) -# #print "DB: test step=%f: f=%f" % (i*STEP, test_f) -# if test_f < last_f: -# trial_x0 = test_x -# print "Found new direction in %d tries, new f(x0) = %g" % (numTries,test_f) -# break -# last_f = test_f -# -# if trial_x0 is None: -# raise ValueError("Maximum number of direction tries exceeded") -# -# solution = _optResult() -# solution.x = global_best_params; solution.fun = global_best -# solution.success = True -## if it < maxiter: -## solution.success = True -## else: -## solution.success = False -## solution.message = "Maximum iterations exceeded" -# return solution - - def create_objfn_printer(obj_func, start_time=None): """ Create a callback function that prints the value of an objective function. diff --git a/pygsti/optimize/wildcardopt.py b/pygsti/optimize/wildcardopt.py index 2fc5880d6..f2d794d38 100644 --- a/pygsti/optimize/wildcardopt.py +++ b/pygsti/optimize/wildcardopt.py @@ -67,19 +67,6 @@ def _wildcard_fit_criteria(wv): return max(0, two_dlogl - two_dlogl_threshold) + percircuit_penalty - ##For debugging wildcard (see below for suggested insertion point) - #def _wildcard_fit_criteria_debug(wv): - # dlogl_elements = logl_wildcard_fn.lsvec(wv)**2 # b/c WC fn only has sqrt of terms implemented now - # for i in range(num_circuits): - # dlogl_percircuit[i] = _np.sum(dlogl_elements[layout.indices_for_index(i)], axis=0) - # two_dlogl_percircuit = 2 * dlogl_percircuit - # two_dlogl = sum(two_dlogl_percircuit) - # print("Aggregate penalty = ", two_dlogl, "-", two_dlogl_threshold, "=", two_dlogl - two_dlogl_threshold) - # print("Per-circuit (redbox) penalty = ", sum(_np.clip(two_dlogl_percircuit - redbox_threshold, 0, None))) - # print(" per-circuit threshold = ", redbox_threshold, " highest violators = ") - # sorted_percircuit = sorted(enumerate(two_dlogl_percircuit), key=lambda x: x[1], reverse=True) - # print('\n'.join(["(%d) %s: %g" % (i, layout.circuits[i].str, val) for i, val in sorted_percircuit[0:10]])) - num_iters = 0 wvec_init = budget.to_vector() @@ -541,13 +528,6 @@ def NewtonObjective_derivs(x): Hobj = t * _np.diag(-1.0 / (sqrtVec**3) * (c**2 * x)**2 + c**2 / sqrtVec) + Hbarrier return obj, Dobj, Hobj - #import scipy.optimize - #def barrier_obj(x): - # x = _np.clip(x, 1e-10, None) - # return t * _np.dot(c.T, x) - _np.log(-barrierF(x, False)) - #result = scipy.optimize.minimize(barrier_obj, x, method="CG") - #x = _np.clip(result.x, 0, None) - x, debug_x_list = NewtonSolve(x, NewtonObjective, NewtonObjective_derivs, tol, max_iters, printer - 1) #x, debug_x_list = NewtonSolve(x, NewtonObjective, None, tol, max_iters, printer - 1) # use finite-diff derivs diff --git a/pygsti/protocols/estimate.py b/pygsti/protocols/estimate.py index b478de2a3..897549ae1 100644 --- a/pygsti/protocols/estimate.py +++ b/pygsti/protocols/estimate.py @@ -87,7 +87,6 @@ def from_dir(cls, dirname, quick_load=False): @classmethod def _create_obj_from_doc_and_mongodb(cls, doc, mongodb, quick_load=False): - #def from_mongodb(cls, mongodb_collection, doc_id, ): ret = cls.__new__(cls) _MongoSerializable.__init__(ret, doc.get('_id', None)) ret.__dict__.update(_io.read_auxtree_from_mongodb_doc(mongodb, doc, 'auxfile_types', quick_load=quick_load)) diff --git a/pygsti/protocols/gst.py b/pygsti/protocols/gst.py index 9255943d3..f529f4de8 100644 --- a/pygsti/protocols/gst.py +++ b/pygsti/protocols/gst.py @@ -1770,11 +1770,6 @@ def __init__(self, modes=('full TP','CPTPLND','Target'), gaugeopt_suite='stdgaug #Advanced options that could be changed by users who know what they're doing self.starting_point = {} # a dict whose keys are modes - #def run_using_germs_and_fiducials(self, dataset, target_model, prep_fiducials, meas_fiducials, germs, max_lengths): - # design = StandardGSTDesign(target_model, prep_fiducials, meas_fiducials, germs, max_lengths) - # data = _proto.ProtocolData(design, dataset) - # return self.run(data) - def run(self, data, memlimit=None, comm=None, checkpoint=None, checkpoint_path=None, disable_checkpointing=False, simulator: Optional[ForwardSimulator.Castable]=None): """ diff --git a/pygsti/protocols/modeltest.py b/pygsti/protocols/modeltest.py index b29b1b735..d34769624 100644 --- a/pygsti/protocols/modeltest.py +++ b/pygsti/protocols/modeltest.py @@ -128,12 +128,6 @@ def __init__(self, model_to_test, target_model=None, gaugeopt_suite=None, self.circuit_weights = None self.unreliable_ops = ('Gcnot', 'Gcphase', 'Gms', 'Gcn', 'Gcx', 'Gcz') - #def run_using_germs_and_fiducials(self, model, dataset, target_model, prep_fiducials, - # meas_fiducials, germs, maxLengths): - # from .gst import StandardGSTDesign as _StandardGSTDesign - # design = _StandardGSTDesign(target_model, prep_fiducials, meas_fiducials, germs, maxLengths) - # return self.run(_proto.ProtocolData(design, dataset)) - def run(self, data, memlimit=None, comm=None, checkpoint=None, checkpoint_path=None, disable_checkpointing=False, simulator: Optional[ForwardSimulator.Castable]=None): """ diff --git a/pygsti/protocols/protocol.py b/pygsti/protocols/protocol.py index 28c2459c3..0ef5cd808 100644 --- a/pygsti/protocols/protocol.py +++ b/pygsti/protocols/protocol.py @@ -1464,12 +1464,6 @@ def from_edesign(cls, edesign): else: raise ValueError("Cannot convert a %s to a %s!" % (str(type(edesign)), str(cls))) - #@classmethod - #def from_tensored_circuits(cls, circuits, template_edesign, qubit_labels_per_edesign): - # pass #Useful??? - need to break each circuit into different parts - # based on qubits, then copy (?) template edesign and just replace itself - # all_circuits_needing_data member? - def __init__(self, edesigns, tensored_circuits=None, qubit_labels=None): """ Create a new SimultaneousExperimentDesign object. @@ -1959,9 +1953,6 @@ def is_multipass(self): """ return isinstance(self.dataset, (_data.MultiDataSet, dict)) - #def underlying_tree_paths(self): - # return self.edesign.get_tree_paths() - def prune_tree(self, paths, paths_are_sorted=False): """ Prune the tree rooted here to include only the given paths, discarding all else. diff --git a/pygsti/report/factory.py b/pygsti/report/factory.py index 7d97fcd11..8d2f675d7 100644 --- a/pygsti/report/factory.py +++ b/pygsti/report/factory.py @@ -84,21 +84,6 @@ def _add_lbl(lst, lbl): return running_lbls -#def _robust_estimate_has_same_models(estimates, est_lbl): -# lbl_robust = est_lbl+ROBUST_SUFFIX -# if lbl_robust not in estimates: return False #no robust estimate -# -# for mdl_lbl in list(estimates[est_lbl].goparameters.keys()) \ -# + ['final iteration estimate']: -# if mdl_lbl not in estimates[lbl_robust].models: -# return False #robust estimate is missing mdl_lbl! -# -# mdl = estimates[lbl_robust].models[mdl_lbl] -# if estimates[est_lbl].models[mdl_lbl].frobeniusdist(mdl) > 1e-8: -# return False #model mismatch! -# -# return True - def _get_viewable_crf(est, est_lbl, mdl_lbl, verbosity=0): printer = _VerbosityPrinter.create_printer(verbosity) diff --git a/pygsti/report/fogidiagram.py b/pygsti/report/fogidiagram.py index a53b1681a..a93486fdd 100644 --- a/pygsti/report/fogidiagram.py +++ b/pygsti/report/fogidiagram.py @@ -379,10 +379,6 @@ def __init__(self, fogi_stores, op_coefficients, model_dim, op_to_target_qubits= def _normalize(self, v): return -_np.log10(max(v, 10**(-self.MAX_POWER)) * 10**self.MIN_POWER) / (self.MAX_POWER - self.MIN_POWER) - #def _normalize(v): - # v = min(max(v, 10**(-MAX_POWER)), 10**(-MIN_POWER)) - # return 1.0 - v / (10**(-MIN_POWER) - 10**(-MAX_POWER)) - def _node_HScolor(self, Hvalue, Svalue): r, g, b, a = _Hcmap(self._normalize(Hvalue)) r2, g2, b2, a2 = _Scmap(self._normalize(Svalue)) @@ -622,18 +618,6 @@ def _render_drawing(self, drawing, filename): if filename: d.saveSvg(filename) return d - #def _draw_node_simple(self, drawing, r, theta, coh, sto, op_label, total, val_max): - # nodes = drawing.nodes - # back_color, border_color, tcolor, _, labels, _ = self._get_node_colors(coh, sto, total) - # x, y = r * _np.cos(theta), r * _np.sin(theta) - # scale = (coh + sto) / val_max - # node_width = 20 + 40 * scale - # node_height = 20 + 40 * scale - # nodes.append(_draw.Rectangle(x - node_width / 2, y - node_height / 2, node_width, node_height, rx=3, - # fill=back_color, stroke=border_color, stroke_width=2)) - # nodes.append(_draw.Text(labels, self.node_fontsize * (0.5 + scale), x, y, fill=tcolor, - # text_anchor="middle", valign='middle', font_family='Times')) - def _draw_node(self, drawing, r, theta, coh, sto, op_label, total, val_max, groupid, info): nodes = drawing.nodes back_color, border_color, tcolor, _, labels, _ = self._get_node_colors(coh, sto, total) diff --git a/pygsti/report/reportables.py b/pygsti/report/reportables.py index 99495c8f2..e56e756b4 100644 --- a/pygsti/report/reportables.py +++ b/pygsti/report/reportables.py @@ -331,13 +331,6 @@ def evaluate_nearby(self, nearby_model): # ref for eigenvalue derivatives: https://www.win.tue.nl/casa/meetings/seminar/previous/_abstract051019_files/Presentation.pdf # noqa -#def circuit_eigenvalues(model, circuit): -# return _np.array(sorted(_np.linalg.eigvals(model.sim.product(circuit)), -# key=lambda ev: abs(ev), reverse=True)) -#CircuitEigenvalues = _modf.modelfn_factory(circuit_eigenvalues) -## init args == (model, circuit) - - def rel_circuit_eigenvalues(model_a, model_b, circuit): """ Eigenvalues of dot(productB(circuit)^-1, productA(circuit)) @@ -542,13 +535,6 @@ def evaluate_nearby(self, nearby_model): val = 0.5 * (_np.vdot(J.real, self.W.real) + _np.vdot(J.imag, self.W.imag)) return val - #def circuit_half_diamond_norm(model_a, model_b, circuit): - # A = model_a.sim.product(circuit) # "gate" - # B = model_b.sim.product(circuit) # "target gate" - # return half_diamond_norm(A, B, model_b.basis) - #CircuitHalfDiamondNorm = _modf.modelfn_factory(circuit_half_diamond_norm) - # # init args == (model_a, model_b, circuit) - else: circuit_half_diamond_norm = None CircuitHalfDiamondNorm = _null_fn diff --git a/pygsti/report/workspace.py b/pygsti/report/workspace.py index e0f90d1be..a1016ea02 100644 --- a/pygsti/report/workspace.py +++ b/pygsti/report/workspace.py @@ -1444,13 +1444,8 @@ def __getattr__(self, attr): #use __dict__ so no chance for recursive __getattr__ return getattr(self.__dict__['base'], attr) - def __len__(self): return len(self.base) - #Future - arithmetic ops should return a new SwitchValue - #def __add__(self,x): return self.base + x - #def __sub__(self,x): return self.base - x - #def __mul__(self,x): return self.base * x - #def __truediv__(self, x): return self.base / x - + def __len__(self): + return len(self.base) class WorkspaceOutput(object): """ @@ -2516,13 +2511,6 @@ def render(self, typ="html", id=None): plotID = "plot_" + id if typ == "html": - - #def getPlotlyDivID(html): - # #could make this more robust using lxml or something later... - # iStart = html.index('div id="') - # iEnd = html.index('"', iStart+8) - # return html[iStart+8:iEnd] - ##pick "master" plot, whose resizing dictates the resizing of other plots, ## as the largest-height plot. #iMaster = None; maxH = 0; diff --git a/pygsti/report/workspaceplots.py b/pygsti/report/workspaceplots.py index b285f2845..2022be44f 100644 --- a/pygsti/report/workspaceplots.py +++ b/pygsti/report/workspaceplots.py @@ -1703,19 +1703,6 @@ def _create(self, plottypes, circuits, dataset, model, prec, sum_up, box_labels, dataset = mdc_store.dataset model = mdc_store.model - #DEBUG: for checking - #def _addl_mx_fn_chk(plaq,x,y): - # gsplaq_ds = plaq.expand_aliases(dataset) - # spamlabels = model.get_spam_labels() - # cntMxs = _ph.total_count_matrix( gsplaq_ds, dataset)[None,:,:] - # probMxs = _ph.probability_matrices( plaq, model, spamlabels, - # probs_precomp_dict) - # freqMxs = _ph.frequency_matrices( gsplaq_ds, dataset, spamlabels) - # logLMxs = _tools.two_delta_logl_term( cntMxs, probMxs, freqMxs, 1e-4) - # return logLMxs.sum(axis=0) # sum over spam labels - - # End "Additional sub-matrix" functions - if not isinstance(plottypes, (list, tuple)): plottypes = [plottypes] diff --git a/pygsti/tools/basistools.py b/pygsti/tools/basistools.py index b87c59f67..2ec896cd5 100644 --- a/pygsti/tools/basistools.py +++ b/pygsti/tools/basistools.py @@ -201,37 +201,6 @@ def change_basis(mx, from_basis, to_basis): (_mt.safe_norm(ret, 'imag'), from_basis, to_basis, ret)) return ret.real -#def transform_matrix(from_basis, to_basis, dim_or_block_dims=None, sparse=False): -# ''' -# Compute the transformation matrix between two bases -# -# Parameters -# ---------- -# from_basis : Basis or str -# Basis being converted from -# -# to_basis : Basis or str -# Basis being converted to -# -# dim_or_block_dims : int or list of ints -# if strings provided as bases, the dimension of basis to use. -# -# sparse : bool, optional -# Whether to construct a sparse or dense transform matrix -# when this isn't specified already by `from_basis` or -# `to_basis` (e.g. when these are both strings). -# -# Returns -# ------- -# Basis -# the composite basis created -# ''' -# if dim_or_block_dims is None: -# assert isinstance(from_basis, Basis) -# else: -# from_basis = Basis(from_basis, dim_or_block_dims, sparse=sparse) -# return from_basis.transform_matrix(to_basis) - def create_basis_pair(mx, from_basis, to_basis): """ diff --git a/pygsti/tools/fastcalc.pyx b/pygsti/tools/fastcalc.pyx index bed8e6c23..9779c83a8 100644 --- a/pygsti/tools/fastcalc.pyx +++ b/pygsti/tools/fastcalc.pyx @@ -573,64 +573,6 @@ def fast_kron(np.ndarray[double, ndim=1, mode="c"] outvec not None, #assert(sz == N) - -#An attempt at a faster matrix prod specific to 2D matrices -- much SLOWER than numpy!! -#@cython.cdivision(True) # turn off divide-by-zero checking -#@cython.boundscheck(False) # turn off bounds-checking for entire function -#@cython.wraparound(False) # turn off negative index wrapping for entire function -#def fast_dot2(np.ndarray[double, ndim=2] out, -# np.ndarray[double, ndim=2] a, np.ndarray[double, ndim=2] b): -# cdef double* out_ptr = out.data -# cdef double* a_ptr = a.data -# cdef double* b_ptr = b.data -# cdef double* arow -# cdef double* bcol -# cdef double* outrow -# cdef double tot -# cdef INT m = a.shape[0] -# cdef INT n = b.shape[1] -# cdef INT l = a.shape[1] -# cdef INT astride = a.strides[0] // a.itemsize -# cdef INT bstride = b.strides[0] // b.itemsize -# cdef INT outstride = out.strides[0] // out.itemsize -# cdef INT ainc = a.strides[1] // a.itemsize -# cdef INT binc = b.strides[1] // b.itemsize -# cdef INT outinc = out.strides[1] // out.itemsize -# cdef INT i_times_astride -# cdef INT i_times_outstride -# cdef INT j_times_binc -# cdef INT j_times_outinc -# cdef INT k_times_bstride -# cdef INT k_times_ainc -# cdef INT i -# cdef INT j -# cdef INT k -# -# # out_ij = sum_k a_ik * b_kl -# -# i_times_astride = 0 -# i_times_outstride = 0 -# for i in range(m): -# arow = &a_ptr[i_times_astride] -# outrow = &out_ptr[i_times_outstride] -# j_times_binc = 0 -# j_times_outinc = 0 -# for j in range(n): -# bcol = &b_ptr[j_times_binc] -# k_times_bstride = 0 -# k_times_ainc = 0 -# tot = 0.0 -# for k in range(l): -# tot = tot + arow[k_times_ainc] * bcol[k_times_bstride] -# k_times_bstride = k_times_bstride + bstride -# k_times_ainc = k_times_ainc + ainc -# outrow[j_times_outinc] = tot -# j_times_binc = j_times_binc + binc -# j_times_outinc = j_times_outinc + outinc -# i_times_astride = i_times_astride + astride -# i_times_outstride = i_times_outstride + outstride - - @cython.boundscheck(False) # turn off bounds-checking for entire function @cython.wraparound(False) # turn off negative index wrapping for entire function def fast_kron_complex(np.ndarray[np.complex128_t, ndim=1, mode="c"] outvec not None, diff --git a/pygsti/tools/fogitools.py b/pygsti/tools/fogitools.py index bbc8bed1c..ec8fcc374 100644 --- a/pygsti/tools/fogitools.py +++ b/pygsti/tools/fogitools.py @@ -732,41 +732,6 @@ def resolve_norm_order(vecs_to_normalize, label_lists, given_norm_order): return (fogi_dirs, fogi_meta, dep_fogi_dirs, dep_fogi_meta) -#def create_fogi_dir_labels(fogi_opsets, fogi_dirs, fogi_rs, fogi_gaugespace_dirs, errorgen_coefficients): -# -# fogi_names = [] -# fogi_abbrev_names = [] -# -# # Note: fogi_dirs is a 2D array, so .T to iterate over cols, whereas fogi_gaugespace_dirs -# # is a list of vectors, so just iterating is fine. -# for opset, fogi_dir, fogi_epsilon in zip(fogi_opsets, fogi_dirs.T, fogi_gaugespace_dirs): -# -# if len(opset) == 1: # Intrinsic quantity -# assert(fogi_epsilon is None) -# op_elemgen_labels = errorgen_coefficient_labels[op_label] -# errgen_name = elem_vec_name(fogi_dir, op_elemgen_labels) -# errgen_names_abbrev = elem_vec_names(local_fogi_dirs, op_elemgen_labels, include_type=False) -# fogi_names.extend(["%s_%s" % ((("(%s)" % egname) if (' ' in egname) else egname), -# op_label_abbrevs.get(op_label, str(op_label))) -# for egname in errgen_names]) -# fogi_abbrev_names.extend(errgen_names_abbrev) -# -# intersection_space_to_add = _np.take(intersection_space, rel_cols_to_add, axis=1) -# #intersection_space_to_add = _np.dot(gauge_linear_combos, indep_intersection_space) \ -# # if (gauge_linear_combos is not None) else intersection_space_to_add -# -# -# -# -# intersection_names = elem_vec_names(intersection_space_to_add, gauge_elemgen_labels) -# intersection_names_abbrev = elem_vec_names(intersection_space_to_add, gauge_elemgen_labels, -# include_type=False) -# fogi_names.extend(["ga(%s)_%s - ga(%s)_%s" % ( -# iname, "|".join([op_label_abbrevs.get(l, str(l)) for l in existing_set]), -# iname, op_label_abbrevs.get(op_label, str(op_label))) for iname in intersection_names]) -# fogi_abbrev_names.extend(["ga(%s)" % iname for iname in intersection_names_abbrev]) - - def compute_maximum_relational_errors(primitive_op_labels, errorgen_coefficients, gauge_action_matrices, errorgen_coefficient_bases_by_op, gauge_basis, model_dim): """ TODO: docstring """ @@ -865,190 +830,6 @@ def _create_errgen_op(vec, list_of_mxs): return ret -#An alternative but inferior algorithm for constructing FOGI quantities: Keep around for checking/reference or REMOVE? -#def _compute_fogi_via_nullspaces(self, primitive_op_labels, ham_basis, other_basis, other_mode="all", -# ham_gauge_linear_combos=None, other_gauge_linear_combos=None, -# op_label_abbrevs=None, reduce_to_model_space=True): -# num_ham_elem_errgens = (len(ham_basis) - 1) -# num_other_elem_errgens = (len(other_basis) - 1)**2 if other_mode == "all" else (len(other_basis) - 1) -# ham_elem_labels = [('H', bel) for bel in ham_basis.labels[1:]] -# other_elem_labels = [('S', bel) for bel in other_basis.labels[1:]] if other_mode != "all" else \ -# [('S', bel1, bel2) for bel1 in other_basis.labels[1:] for bel2 in other_basis.labels[1:]] -# assert(len(ham_elem_labels) == num_ham_elem_errgens) -# assert(len(other_elem_labels) == num_other_elem_errgens) -# -# #Get lists of the present (existing within the model) labels for each operation -# ham_labels_for_op = {op_label: ham_elem_labels[:] for op_label in primitive_op_labels} # COPY lists! -# other_labels_for_op = {op_label: other_elem_labels[:] for op_label in primitive_op_labels} # ditto -# if reduce_to_model_space: -# for op_label in primitive_op_labels: -# op = self.operations[op_label] -# lbls = op.errorgen_coefficient_labels() -# present_ham_elem_lbls = set(filter(lambda lbl: lbl[0] == 'H', lbls)) -# present_other_elem_lbls = set(filter(lambda lbl: lbl[0] == 'S', lbls)) -# -# disallowed_ham_space_labels = set(ham_elem_labels) - present_ham_elem_lbls -# disallowed_row_indices = [ham_elem_labels.index(disallowed_lbl) -# for disallowed_lbl in disallowed_ham_space_labels] -# for i in sorted(disallowed_row_indices, reverse=True): -# del ham_labels_for_op[op_label][i] -# -# disallowed_other_space_labels = set(other_elem_labels) - present_other_elem_lbls -# disallowed_row_indices = [other_elem_labels.index(disallowed_lbl) -# for disallowed_lbl in disallowed_other_space_labels] -# for i in sorted(disallowed_row_indices, reverse=True): -# del other_labels_for_op[op_label][i] -# -# #Step 1: construct nullspaces associated with sets of operations -# ham_nullspaces = {} -# other_nullspaces = {} -# max_size = len(primitive_op_labels) -# for set_size in range(1, max_size + 1): -# ham_nullspaces[set_size] = {} # dict mapping operation-sets of `set_size` to nullspaces -# other_nullspaces[set_size] = {} -# -# for op_set in _itertools.combinations(primitive_op_labels, set_size): -# #print(op_set) -# ham_gauge_action_mxs = [] -# other_gauge_action_mxs = [] -# ham_rows_by_op = {}; h_off = 0 -# other_rows_by_op = {}; o_off = 0 -# for op_label in op_set: # Note: "ga" stands for "gauge action" in variable names below -# op = self.operations[op_label] -# if isinstance(op, _op.LindbladOp): -# op_mx = op.unitary_postfactor.to_dense() -# else: -# assert(False), "STOP - you probably don't want to do this!" -# op_mx = op.to_dense() -# U = _bt.change_basis(op_mx, self.basis, 'std') -# ham_ga = _gt.first_order_ham_gauge_action_matrix(U, ham_basis) -# other_ga = _gt.first_order_other_gauge_action_matrix(U, other_basis, other_mode) -# -# if ham_gauge_linear_combos is not None: -# ham_ga = _np.dot(ham_ga, ham_gauge_linear_combos) -# if other_gauge_linear_combos is not None: -# other_ga = _np.dot(other_ga, other_gauge_linear_combos) -# -# ham_gauge_action_mxs.append(ham_ga) -# other_gauge_action_mxs.append(other_ga) -# reduced_ham_nrows = len(ham_labels_for_op[op_label]) # ham_ga.shape[0] when unrestricted -# reduced_other_nrows = len(other_labels_for_op[op_label]) # other_ga.shape[0] when unrestricted -# ham_rows_by_op[op_label] = slice(h_off, h_off + reduced_ham_nrows); h_off += reduced_ham_nrows -# other_rows_by_op[op_label] = slice(o_off, o_off + reduced_other_nrows); o_off += reduced_other_nrows -# assert(ham_ga.shape[0] == num_ham_elem_errgens) -# assert(other_ga.shape[0] == num_other_elem_errgens) -# -# #Stack matrices to form "base" gauge action matrix for op_set -# ham_ga_mx = _np.concatenate(ham_gauge_action_mxs, axis=0) -# other_ga_mx = _np.concatenate(other_gauge_action_mxs, axis=0) -# -# # Intersect gauge action with the space of elementary errorgens present in the model. -# # We may need to eliminate some rows of X_ga matrices, and (only) keep linear combos -# # of the columns that are zero on these rows. -# present_ham_elem_lbls = set() -# present_other_elem_lbls = set() -# for op_label in op_set: -# op = self.operations[op_label] -# lbls = op.errorgen_coefficient_labels() # length num_coeffs -# present_ham_elem_lbls.update([(op_label, lbl) for lbl in lbls if lbl[0] == 'H']) -# present_other_elem_lbls.update([(op_label, lbl) for lbl in lbls if lbl[0] == 'S']) -# -# full_ham_elem_labels = [(op_label, elem_lbl) for op_label in op_set -# for elem_lbl in ham_elem_labels] -# assert(present_ham_elem_lbls.issubset(full_ham_elem_labels)), \ -# "The given space of hamiltonian elementary gauge-gens must encompass all those in model ops!" -# disallowed_ham_space_labels = set(full_ham_elem_labels) - present_ham_elem_lbls -# disallowed_row_indices = [full_ham_elem_labels.index(disallowed_lbl) -# for disallowed_lbl in disallowed_ham_space_labels] -# -# if reduce_to_model_space and len(disallowed_row_indices) > 0: -# #disallowed_rows = _np.take(ham_ga_mx, disallowed_row_indices, axis=0) -# #allowed_linear_combos = _mt.nice_nullspace(disallowed_rows, tol=1e-4) -# #ham_ga_mx = _np.dot(ham_ga_mx, allowed_linear_combos) -# ham_ga_mx = _np.delete(ham_ga_mx, disallowed_row_indices, axis=0) -# -# full_other_elem_labels = [(op_label, elem_lbl) for op_label in op_set -# for elem_lbl in other_elem_labels] -# assert(present_other_elem_lbls.issubset(full_other_elem_labels)), \ -# "The given space of 'other' elementary gauge-gens must encompass all those in model ops!" -# disallowed_other_space_labels = set(full_other_elem_labels) - present_other_elem_lbls -# disallowed_row_indices = [full_other_elem_labels.index(disallowed_lbl) -# for disallowed_lbl in disallowed_other_space_labels] -# -# if reduce_to_model_space and len(disallowed_row_indices) > 0: -# #disallowed_rows = _np.take(other_ga_mx, disallowed_row_indices, axis=0) -# #allowed_linear_combos = _mt.nice_nullspace(disallowed_rows, tol=1e-4) -# #other_ga_mx = _np.dot(other_ga_mx, allowed_linear_combos) -# other_ga_mx = _np.delete(other_ga_mx, disallowed_row_indices, axis=0) -# -# #Add all known (already tabulated) nullspace directions so that we avoid getting them again -# # when we compute the nullspace of the gauge action matrix below. -# for previous_size in range(1, set_size + 1): # include current size! -# for previous_op_set, (nullsp, previous_rows) in ham_nullspaces[previous_size].items(): -# padded_nullsp = _np.zeros((ham_ga_mx.shape[0], nullsp.shape[1]), 'd') -# for op in previous_op_set: -# if op not in ham_rows_by_op: continue -# padded_nullsp[ham_rows_by_op[op], :] = nullsp[previous_rows[op], :] -# ham_ga_mx = _np.concatenate((ham_ga_mx, padded_nullsp), axis=1) -# -# for previous_op_set, (nullsp, previous_rows) in other_nullspaces[previous_size].items(): -# padded_nullsp = _np.zeros((other_ga_mx.shape[0], nullsp.shape[1]), other_ga_mx.dtype) -# for op in previous_op_set: -# if op not in other_rows_by_op: continue -# padded_nullsp[other_rows_by_op[op], :] = nullsp[previous_rows[op], :] -# other_ga_mx = _np.concatenate((other_ga_mx, padded_nullsp), axis=1) -# -# #Finally, compute the nullspace of the resulting gauge-action + already-tallied matrix: -# nullspace = _mt.nice_nullspace(ham_ga_mx.T) -# ham_nullspaces[set_size][op_set] = (nullspace, ham_rows_by_op) -# #DEBUG: print(" NULLSP DIM = ",nullspace.shape[1]) -# #DEBUG: labels = [(op_label, elem_lbl) for op_label in op_set -# #DEBUG: for elem_lbl in ham_labels_for_op[op_label]] -# #DEBUG: print("\n".join(fogi_names(nullspace, labels, op_label_abbrevs))) -# -# nullspace = _mt.nice_nullspace(other_ga_mx.T) -# other_nullspaces[set_size][op_set] = (nullspace, other_rows_by_op) -# -# # Step 2: convert these per-operation-set nullspaces into vectors over a single "full" -# # space of all the elementary error generators (as given by ham_basis, other_basis, & other_mode) -# -# # Note: "full" designation is for space of all elementary error generators as given by their -# # supplied ham_basis, other_basis, and other_mode. -# -# # Construct full-space vectors for each nullspace vector found by crawling through -# # the X_nullspaces dictionary and embedding values as needed. -# ham_rows_by_op = {}; off = 0 -# for op_label in primitive_op_labels: -# ham_rows_by_op[op_label] = slice(off, off + len(ham_labels_for_op[op_label])) -# off += len(ham_labels_for_op[op_label]) -# full_ham_fogi_vecs = _np.empty((off, 0), 'd') -# for size in range(1, max_size + 1): -# for op_set, (nullsp, op_set_rows) in ham_nullspaces[size].items(): -# padded_nullsp = _np.zeros((full_ham_fogi_vecs.shape[0], nullsp.shape[1]), 'd') -# for op in op_set: -# padded_nullsp[ham_rows_by_op[op], :] = nullsp[op_set_rows[op], :] -# full_ham_fogi_vecs = _np.concatenate((full_ham_fogi_vecs, padded_nullsp), axis=1) -# -# other_rows_by_op = {}; off = 0 -# for op_label in primitive_op_labels: -# other_rows_by_op[op_label] = slice(off, off + len(other_labels_for_op[op_label])) -# off += len(other_labels_for_op[op_label]) -# full_other_fogi_vecs = _np.empty((off, 0), complex) -# for size in range(1, max_size + 1): -# for op_set, (nullsp, op_set_rows) in other_nullspaces[size].items(): -# padded_nullsp = _np.zeros((full_other_fogi_vecs.shape[0], nullsp.shape[1]), complex) -# for op in op_set: -# padded_nullsp[other_rows_by_op[op], :] = nullsp[op_set_rows[op], :] -# full_other_fogi_vecs = _np.concatenate((full_other_fogi_vecs, padded_nullsp), axis=1) -# -# assert(_np.linalg.matrix_rank(full_ham_fogi_vecs) == full_ham_fogi_vecs.shape[1]) -# assert(_np.linalg.matrix_rank(full_other_fogi_vecs) == full_other_fogi_vecs.shape[1]) -# -# # Returns the vectors of FOGI (first order gauge invariant) linear combos as well -# # as lists of labels for the columns & rows, respectively. -# return (full_ham_fogi_vecs, ham_labels_for_op), (full_other_fogi_vecs, other_labels_for_op) - - def op_elem_vec_name(vec, elem_op_labels, op_label_abbrevs): name = "" for i, (op_lbl, elem_lbl) in enumerate(elem_op_labels): diff --git a/pygsti/tools/listtools.py b/pygsti/tools/listtools.py index 67de5be94..0beb2749c 100644 --- a/pygsti/tools/listtools.py +++ b/pygsti/tools/listtools.py @@ -383,117 +383,3 @@ def lists_to_tuples(obj): return {lists_to_tuples(k): lists_to_tuples(v) for k, v in obj.items()} else: return obj - - -# ------------------------------------------------------------------------------ -# Machinery initially designed for an in-place take operation, which computes -# how to do in-place permutations of arrays/lists efficiently. Kept here -# commented out in case this is needed some time in the future. -# ------------------------------------------------------------------------------ -# -#def build_permute_copy_order(indices): -# #Construct a list of the operations needed to "take" indices -# # out of an array. -# -# nIndices = len(indices) -# flgs = _np.zeros(nIndices,'bool') #flags indicating an index has been processed -# shelved = {} -# copyList = [] -# -# while True: #loop until we've processed everything -# -# #The cycle has ended. Now find an unprocessed -# # destination to begin a new cycle -# for i in range(nIndices): -# if flgs[i] == False: -# if indices[i] == i: # index i is already where it need to be! -# flgs[i] = True -# else: -# cycleFirstIndex = iDest = i -# if cycleFirstIndex in indices: -# copyList.append( (-1,i) ) # iDest == -1 means copy to offline storage -# break; -# else: -# break #everything has been processed -- we're done! -# -# while True: # loop over cycle -# -# # at this point, data for index iDest has been stored or copied -# iSrc = indices[iDest] # get source index for current destination -# -# # record appropriate copy command -# if iSrc == cycleFirstIndex: -# copyList.append( (iDest, -1) ) # copy from offline storage -# flgs[iDest] = True -# -# #end of this cycle since we've hit our starting point, -# # but no need to shelve first cycle element in this case. -# break #(end of cycle) -# else: -# if iSrc in shelved: #original iSrc is now at index shelved[iSrc] -# iSrc = shelved[iSrc] -# -# copyList.append( (iDest,iSrc) ) # => copy src -> dest -# flgs[iDest] = True -# -# if iSrc < nIndices: -# #Continue cycle (swapping within "active" (index < nIndices) region) -# iDest = iSrc # make src the new dest -# else: -# #end of this cycle, and first cycle index hasn't been -# # used, so shelve it (store it for later use) if it -# # will be needed in the future. -# if cycleFirstIndex in indices: -# copyList.append( (iSrc,-1) ) -# shelved[cycleFirstIndex] = iSrc -# -# break #(end of cycle) -# -# return copyList -# -## X X X -## 0 1 2 3 (nIndices == 4) -## 3, 0, 7, 4 -## store 0 -## 3 -> 0 -## 4 -> 3 -## stored[0] -> 4, shelved[0] = 4 -## store 1 -## shelved[0]==4 -> 1, NO((stored[1] -> 4, shelved[1] = 4)) B/C don't need index 1 -## store 2 -## 7 -> 2 -## NO((Stored[2] -> 7, istore[2] = 7)) -# -# -#def inplace_take(a, indices, axis=None, copyList=None): -# check = a.take(indices, axis=axis) #DEBUGGING -# return check #FIX FOR NOW = COPY -# -# if axis is None: -# def mkindex(i): -# return i -# else: -# def mkindex(i): -# sl = [slice(None)] * a.ndim -# sl[axis] = i -# return sl -# -# if copyList is None: -# copyList = build_permute_copy_order(indices) -# -# store = None -# for iDest,iSrc in copyList: -# if iDest == -1: store = a[mkindex(iSrc)].copy() #otherwise just get a view! -# elif iSrc == -1: a[mkindex(iDest)] = store -# else: a[mkindex(iDest)] = a[mkindex(iSrc)] -# -# ret = a[mkindex(slice(0,len(indices)))] -# if _np.linalg.norm(ret-check) > 1e-8 : -# print("ERROR CHECK FAILED") -# print("ret = ",ret) -# print("check = ",check) -# print("diff = ",_np.linalg.norm(ret-check)) -# assert(False) -# #check = None #free mem? -# #return ret -# return check diff --git a/pygsti/tools/matrixtools.py b/pygsti/tools/matrixtools.py index 77a826c97..94940c45c 100644 --- a/pygsti/tools/matrixtools.py +++ b/pygsti/tools/matrixtools.py @@ -1833,23 +1833,6 @@ def _custom_expm_multiply_simple_core(a, b, mu, m_star, s, tol, eta): # t == 1. return F -#From SciPy source, as a reference - above we assume A is a sparse csr matrix -# and B is a dense vector -#def _exact_inf_norm(A): -# # A compatibility function which should eventually disappear. -# if scipy.sparse.isspmatrix(A): -# return max(abs(A).sum(axis=1).flat) -# else: -# return np.linalg.norm(A, np.inf) -# -# -#def _exact_1_norm(A): -# # A compatibility function which should eventually disappear. -# if scipy.sparse.isspmatrix(A): -# return max(abs(A).sum(axis=0).flat) -# else: -# return np.linalg.norm(A, 1) - def expop_multiply_prep(op, a_1_norm=None, tol=EXPM_DEFAULT_TOL): """ Returns "prepared" meta-info about operation op, which is assumed to be traceless (so no shift is needed). @@ -2216,15 +2199,6 @@ def union_space(space1, space2, tol=1e-7): return VW[:, indep_cols] -#UNUSED -#def spectral_radius(x): -# if hasattr(x, 'ndim') and x.ndim == 2: # then interpret as a numpy array and take norm -# evals = _np.sort(_np.linalg.eigvals(x)) -# return abs(evals[-1] - evals[0]) -# else: -# return x - - def jamiolkowski_angle(hamiltonian_mx): """ TODO: docstring diff --git a/pygsti/tools/rbtheory.py b/pygsti/tools/rbtheory.py index 79e23f06c..6ea77d8b4 100644 --- a/pygsti/tools/rbtheory.py +++ b/pygsti/tools/rbtheory.py @@ -798,72 +798,3 @@ def gate_dependence_of_errormaps(model, target_model, norm='diamond', mx_basis=N delta_avg = _np.mean(delta) return delta_avg - -# Future : perhaps put these back in. -#def Magesan_theory_predicted_decay(model, target_model, mlist, success_outcomelabel=('0',), -# norm='1to1', order='zeroth', return_all = False): -# -# assert(order == 'zeroth' or order == 'first') -# -# d = int(round(_np.sqrt(model.dim))) -# MTPs = {} -# MTPs['r'] = gateset_infidelity(model,target_model,itype='AGI') -# MTPs['p'] = _analysis.r_to_p(MTPs['r'],d,rtype='AGI') -# MTPs['delta'] = gate_dependence_of_errormaps(model, target_model, norm) -# error_gs = errormaps(model, target_model) -# -# R_list = [] -# Q_list = [] -# for gate in list(target_model.operations.keys()): -# R_list.append(_np.dot(_np.dot(error_gs.operations[gate],target_model.operations[gate]), -# _np.dot(error_gs.operations['Gavg'],_np.transpose(target_model.operations[gate])))) -# Q_list.append(_np.dot(target_model.operations[gate], -# _np.dot(error_gs.operations[gate],_np.transpose(target_model.operations[gate])))) -# -# error_gs.operations['GR'] = _np.mean(_np.array([ i for i in R_list]),axis=0) -# error_gs.operations['GQ'] = _np.mean(_np.array([ i for i in Q_list]),axis=0) -# error_gs.operations['GQ2'] = _np.dot(error_gs.operations['GQ'],error_gs.operations['Gavg']) -# error_gs.preps['rhoc_mixed'] = 1./d*_cnst.create_identity_vec(error_gs.basis)# -# -# #Assumes standard POVM labels -# povm = _objs.UnconstrainedPOVM( [('0_cm', target_model.povms['Mdefault']['0']), -# ('1_cm', target_model.povms['Mdefault']['1'])] ) -# ave_error_gsl = _cnst.to_circuits([('rho0','Gavg'),('rho0','GR'),('rho0','Gavg','GQ')]) -# data = _cnst.simulate_data(error_gs, ave_error_gsl, num_samples=1, sample_error="none")# - -# pr_L_p = data[('rho0','Gavg')][success_outcomelabel] -# pr_L_I = data[('rho0','Gavg')][success_outcomelabel_cm] -# pr_R_p = data[('rho0','GR')][success_outcomelabel] -# pr_R_I = data[('rho0','GR')][success_outcomelabel_cm] -# pr_Q_p = data[('rho0','Gavg','GQ')][success_outcomelabel] -# p = MTPs['p'] -# B_1 = pr_R_I -# A_1 = (pr_Q_p/p) - pr_L_p + ((p -1)*pr_L_I/p) + ((pr_R_p - pr_R_I)/p) -# C_1 = pr_L_p - pr_L_I -# q = _tls.average_gate_infidelity(error_gs.operations['GQ2'],_np.identity(d**2,float)) -# q = _analysis.r_to_p(q,d,rtype='AGI') -# -# if order == 'zeroth': -# MTPs['A'] = pr_L_I -# MTPs['B'] = pr_L_p - pr_L_I -# if order == 'first': -# MTPs['A'] = B_1 -# MTPs['B'] = A_1 - C_1*(q - 1)/p**2 -# MTPs['C'] = C_1*(q- p**2)/p**2 -# -# if order == 'zeroth': -# Pm = MTPs['A'] + MTPs['B']*MTPs['p']**_np.array(mlist) -# if order == 'first': -# Pm = MTPs['A'] + (MTPs['B'] + _np.array(mlist)*MTPs['C'])*MTPs['p']**_np.array(mlist) -# -# sys_eb = (MTPs['delta'] + 1)**(_np.array(mlist)+1) - 1 -# if order == 'first': -# sys_eb = sys_eb - (_np.array(mlist)+1)*MTPs['delta'] -# -# upper = Pm + sys_eb -# upper[upper > 1]=1. -# -# lower = Pm - sys_eb -# lower[lower < 0]=0. -# -# return mlist, Pm, upper, lower, MTPs diff --git a/test/test_packages/iotest/test_codecs.py b/test/test_packages/iotest/test_codecs.py index b8d718a9f..f14d26f1e 100644 --- a/test/test_packages/iotest/test_codecs.py +++ b/test/test_packages/iotest/test_codecs.py @@ -327,27 +327,6 @@ def test_pickle_dataset_with_circuitlabels(self): self.assertEqual(c2.str, "([Gx:0Gy:1])^2") pygsti.circuits.Circuit.default_expand_subcircuits = True - #Debugging, because there was some weird python3 vs 2 json incompatibility with string labels - # - turned out to be that the unit test files needed to import unicode_literals from __future__ - #def test_labels(self): - # strLabel = pygsti.baseobjs.Label("Gi") - # #strLabel = ("Gi",) - # from pygsti.modelpacks.legacy import std1Q_XYI as std - # - # s = json.dumps(strLabel) - # print("s = ",str(s)) - # x = msgpack.loads(s) - # print("x = ",x) - # - # print("-----------------------------") - # - # s = json.dumps(std.prepStrs[2]) - # print("s = ",s) - # x = json.loads(s) - # print("x = ",x) - # assert(False),"STOP" - - if __name__ == "__main__": unittest.main(verbosity=2) diff --git a/test/unit/modelmembers/test_operation.py b/test/unit/modelmembers/test_operation.py index 720c3ee91..d3b44deef 100644 --- a/test/unit/modelmembers/test_operation.py +++ b/test/unit/modelmembers/test_operation.py @@ -745,12 +745,6 @@ def build_gate(): mx = np.identity(state_space.dim, 'd') return op.EmbeddedOp(state_space, ['Q0'], op.FullArbitraryOp(mx, evotype=evotype, state_space=None)) - #This is really a state-space unit test - #def test_constructor_raises_on_bad_state_space_label(self): - # mx = np.identity(4, 'd') - # with self.assertRaises(ValueError): - # op.EmbeddedOp([('L0', 'foobar')], ['Q0'], op.FullArbitraryOp(mx)) - def test_constructor_raises_on_state_space_label_mismatch(self): mx = np.identity(4, 'd') state_space = statespace.StateSpace.cast([('Q0',), ('Q1',)]) diff --git a/test/unit/objects/test_evaltree.py b/test/unit/objects/test_evaltree.py deleted file mode 100644 index f0e73b376..000000000 --- a/test/unit/objects/test_evaltree.py +++ /dev/null @@ -1,58 +0,0 @@ -import numpy as np - -from ..util import BaseCase - - -#TODO: create an evaltree and use this function to check it -- this was -# taken from an internal checking function within evaltree.py -#def check_tree(evaltree, original_list): #generate_circuit_list(self, permute=True): -# """ -# Generate a list of the final operation sequences this tree evaluates. -# -# This method essentially "runs" the tree and follows its -# prescription for sequentailly building up longer strings -# from shorter ones. When permute == True, the resulting list -# should be the same as the one passed to initialize(...), and -# so this method may be used as a consistency check. -# -# Parameters -# ---------- -# permute : bool, optional -# Whether to permute the returned list of strings into the -# same order as the original list passed to initialize(...). -# When False, the computed order of the operation sequences is -# given, which is matches the order of the results from calls -# to `Model` bulk operations. Non-trivial permutation -# occurs only when the tree is split (in order to keep -# each sub-tree result a contiguous slice within the parent -# result). -# -# Returns -# ------- -# list of gate-label-tuples -# A list of the operation sequences evaluated by this tree, each -# specified as a tuple of operation labels. -# """ -# circuits = [None] * len(self) -# -# #Set "initial" (single- or zero- gate) strings -# for i, opLabel in zip(self.get_init_indices(), self.get_init_labels()): -# if opLabel == "": circuits[i] = () # special case of empty label -# else: circuits[i] = (opLabel,) -# -# #Build rest of strings -# for i in self.get_evaluation_order(): -# iLeft, iRight = self[i] -# circuits[i] = circuits[iLeft] + circuits[iRight] -# -# #Permute to get final list: -# nFinal = self.num_final_strings() -# if self.original_index_lookup is not None and permute: -# finalCircuits = [None] * nFinal -# for iorig, icur in self.original_index_lookup.items(): -# if iorig < nFinal: finalCircuits[iorig] = circuits[icur] -# assert(None not in finalCircuits) -# return finalCircuits -# else: -# assert(None not in circuits[0:nFinal]) -# return circuits[0:nFinal] diff --git a/test/unit/objects/test_model.py b/test/unit/objects/test_model.py index fdad0a22c..b9f50c8e0 100644 --- a/test/unit/objects/test_model.py +++ b/test/unit/objects/test_model.py @@ -377,16 +377,6 @@ def test_hproduct(self): hp_flat = self.model.sim.hproduct(circuit, flat=True) # TODO assert correctness for all of the above - #REMOVED from fwdsim (unused) - #def test_bulk_hproduct(self): - # gatestring1 = ('Gx', 'Gy') - # gatestring2 = ('Gx', 'Gy', 'Gy') - # circuits = [gatestring1, gatestring2] - # hp = self.model.sim.bulk_hproduct(circuits) - # hp_flat = self.model.sim.bulk_hproduct(circuits, flat=True) - # hp_scaled, scaleVals = self.model.sim.bulk_hproduct(circuits, scale=True) - # # TODO assert correctness for all of the above - class SimMethodBase(object): """Tests for model methods which can use different forward sims""" diff --git a/test/unit/objects/test_prefixtable.py b/test/unit/objects/test_prefixtable.py deleted file mode 100644 index 38808a51b..000000000 --- a/test/unit/objects/test_prefixtable.py +++ /dev/null @@ -1,62 +0,0 @@ -import numpy as np - -from ..util import BaseCase - - -#TODO: create a prefixtable and use this function to check it -- this was -# taken from an internal checking function within prefixtable.py - -#def _check_prefix_table(prefix_table): #generate_circuit_list(self, permute=True): -# """ -# Generate a list of the final operation sequences this tree evaluates. -# -# This method essentially "runs" the tree and follows its -# prescription for sequentailly building up longer strings -# from shorter ones. When permute == True, the resulting list -# should be the same as the one passed to initialize(...), and -# so this method may be used as a consistency check. -# -# Parameters -# ---------- -# permute : bool, optional -# Whether to permute the returned list of strings into the -# same order as the original list passed to initialize(...). -# When False, the computed order of the operation sequences is -# given, which is matches the order of the results from calls -# to `Model` bulk operations. Non-trivial permutation -# occurs only when the tree is split (in order to keep -# each sub-tree result a contiguous slice within the parent -# result). -# -# Returns -# ------- -# list of gate-label-tuples -# A list of the operation sequences evaluated by this tree, each -# specified as a tuple of operation labels. -# """ -# circuits = [None] * len(self) -# -# cachedStrings = [None] * self.cache_size() -# -# #Build rest of strings -# for i in self.get_evaluation_order(): -# iStart, remainingStr, iCache = self[i] -# if iStart is None: -# circuits[i] = remainingStr -# else: -# circuits[i] = cachedStrings[iStart] + remainingStr -# -# if iCache is not None: -# cachedStrings[iCache] = circuits[i] -# -# #Permute to get final list: -# nFinal = self.num_final_strings() -# if self.original_index_lookup is not None and permute: -# finalCircuits = [None] * nFinal -# for iorig, icur in self.original_index_lookup.items(): -# if iorig < nFinal: finalCircuits[iorig] = circuits[icur] -# assert(None not in finalCircuits) -# return finalCircuits -# else: -# assert(None not in circuits[0:nFinal]) -# return circuits[0:nFinal] diff --git a/test/unit/tools/test_likelihoodfns.py b/test/unit/tools/test_likelihoodfns.py index 6b0eafc41..31327dc01 100644 --- a/test/unit/tools/test_likelihoodfns.py +++ b/test/unit/tools/test_likelihoodfns.py @@ -68,11 +68,6 @@ def test_logl_max(self): maxL2 = lfn.logl_max(self.model, self.ds, self.circuits, poisson_picture=False) # TODO assert correctness - #Removed this function - #def test_cptp_penalty(self): - # lfn.cptp_penalty(self.model, include_spam_penalty=True) - # # TODO assert correctness - def test_two_delta_logl(self): twoDelta1 = lfn.two_delta_logl_term(n=100, p=0.5, f=0.6, min_prob_clip=1e-6, poisson_picture=True) twoDelta2 = lfn.two_delta_logl_term(n=100, p=0.5, f=0.6, min_prob_clip=1e-6, poisson_picture=False) From ccc6464b9bb80c4029b8ed6e5faf08f077593dc1 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Fri, 30 Aug 2024 21:11:09 -0600 Subject: [PATCH 02/41] Add guardrails around public model attributes These changes address unexpected behavior that can occur when manually adding an operation without then manually rebuilding the parameter vector. When this happens it is possible for the Model's internal attributes to fall out of sync with those of it's child objects. Now we check for the need to rebuild the parameter vector every time. --- pygsti/models/model.py | 50 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/pygsti/models/model.py b/pygsti/models/model.py index 77a7b33ab..1442537f8 100644 --- a/pygsti/models/model.py +++ b/pygsti/models/model.py @@ -604,6 +604,56 @@ def num_params(self): """ self._clean_paramvec() return len(self._paramvec) + + @property + def parameter_labels(self): + """ + A list of labels, usually of the form `(op_label, string_description)` describing this model's parameters. + """ + self._clean_paramvec() + return self._paramlbls + + def set_parameter_label(self, index, label): + """ + Set the label of a single model parameter. + + Parameters + ---------- + index : int + The index of the paramter whose label should be set. + + label : object + An object that serves to label this parameter. Often a string. + + Returns + ------- + None + """ + self._clean_paramvec() + self._paramlbls[index] = label + + @property + def parameter_bounds(self): + """ Upper and lower bounds on the values of each parameter, utilized by optimization routines """ + self._clean_paramvec() + return self._param_bounds + + @property + def num_modeltest_params(self): + """ + The parameter count to use when testing this model against data. + + Often times, this is the same as :meth:`num_params`, but there are times + when it can convenient or necessary to use a parameter count different than + the actual number of parameters in this model. + + Returns + ------- + int + the number of model parameters. + """ + self._clean_paramvec() + return Model.num_modeltest_params.fget(self) @property def parameter_labels(self): From 0a1b0caf88f5842e1e9639075b97320fc70cf3bc Mon Sep 17 00:00:00 2001 From: "Stefan K. Seritan" Date: Thu, 12 Sep 2024 13:33:11 -0700 Subject: [PATCH 03/41] Allow SIGINT set to be skipped via env variable. This is relevant when attempting to use Dask outside of pyGSTi, where signals cannot be set in the workers. Setting the PYGSTI_NO_CUSTOMLM_SIGINT env variable now skips this behavior. --- pygsti/optimize/customlm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pygsti/optimize/customlm.py b/pygsti/optimize/customlm.py index 89b749a3a..ce5b24ee1 100644 --- a/pygsti/optimize/customlm.py +++ b/pygsti/optimize/customlm.py @@ -28,7 +28,7 @@ #Make sure SIGINT will generate a KeyboardInterrupt (even if we're launched in the background) #This may be problematic for multithreaded parallelism above pyGSTi, e.g. Dask, #so this can be turned off by setting the PYGSTI_NO_CUSTOMLM_SIGINT environment variable -if 'PYGSTI_NO_CUSTOMLM_SIGINT' not in _os.environ: +if 'PYGSTI_NO_CUSTOMLM_SIGINT' in _os.environ: _signal.signal(_signal.SIGINT, _signal.default_int_handler) #constants From dd7bfe2e7b12b9c15eeb6db532b81901c92577c9 Mon Sep 17 00:00:00 2001 From: "Stefan K. Seritan" Date: Thu, 12 Sep 2024 13:34:10 -0700 Subject: [PATCH 04/41] Logic bugfix --- pygsti/optimize/customlm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pygsti/optimize/customlm.py b/pygsti/optimize/customlm.py index ce5b24ee1..89b749a3a 100644 --- a/pygsti/optimize/customlm.py +++ b/pygsti/optimize/customlm.py @@ -28,7 +28,7 @@ #Make sure SIGINT will generate a KeyboardInterrupt (even if we're launched in the background) #This may be problematic for multithreaded parallelism above pyGSTi, e.g. Dask, #so this can be turned off by setting the PYGSTI_NO_CUSTOMLM_SIGINT environment variable -if 'PYGSTI_NO_CUSTOMLM_SIGINT' in _os.environ: +if 'PYGSTI_NO_CUSTOMLM_SIGINT' not in _os.environ: _signal.signal(_signal.SIGINT, _signal.default_int_handler) #constants From bbbb471630071696e11278b7e72e9a8bc5723219 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Wed, 18 Sep 2024 21:02:40 -0600 Subject: [PATCH 05/41] Fix a bug with parameter label management for interposers A bug in the parameter label handling code was causing parameter labels to explode exponentially in size when _rebuild_paramvec was caused, leading to major memory issues. This now makes it so that the value of _paramlbls is fixed to that of the underlying operations and adds a new version of the parameter_labels property that goes through the interposer (making the interposer labels something generated on demand). Also add a threshold for coefficients printing in the LinearInterposer to avoid obnoxious labels. --- pygsti/models/model.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pygsti/models/model.py b/pygsti/models/model.py index 1442537f8..4f0c5bd9c 100644 --- a/pygsti/models/model.py +++ b/pygsti/models/model.py @@ -490,6 +490,13 @@ def __setstate__(self, state_dict): ## Get/Set methods ########################################## + @property + def parameter_labels(self): + """ + A list of labels, usually of the form `(op_label, string_description)` describing this model's parameters. + """ + return self._ops_paramlbls_to_model_paramlbls(self._paramlbls) + @property def sim(self): """ Forward simulator for this model """ From 9522fb5c9aed83340dd905037ccdfcd8938dca6a Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Wed, 22 May 2024 16:50:42 -0600 Subject: [PATCH 06/41] Initial implementation of caching for layout creation The creation of COPA layouts relies on a number of specialized circuit structures which require non-trivial time to construct. In the context of iterative GST estimation with nested circuit lists (i.e. the default) this results in unnecessarily repeat construction of these objects. This is an initial implementation of a caching scheme allowing for more efficient re-use of these circuit structures across iterations. --- pygsti/algorithms/core.py | 25 ++---- pygsti/circuits/circuit.py | 100 +++++++++++++++++++++ pygsti/forwardsims/matrixforwardsim.py | 2 - pygsti/layouts/matrixlayout.py | 98 +++++++++++--------- pygsti/models/model.py | 119 +++++-------------------- 5 files changed, 185 insertions(+), 159 deletions(-) diff --git a/pygsti/algorithms/core.py b/pygsti/algorithms/core.py index 61c3a2185..8f585d5fd 100644 --- a/pygsti/algorithms/core.py +++ b/pygsti/algorithms/core.py @@ -878,30 +878,19 @@ def _max_array_types(artypes_list): # get the maximum number of each array type #The ModelDatasetCircuitsStore printer.log('Precomputing CircuitOutcomeProbabilityArray layouts for each iteration.', 2) precomp_layouts = [] - #pre-compute a dictionary caching completed circuits for layout construction performance. - unique_circuits = list({ckt for circuit_list in circuit_lists for ckt in circuit_list}) - if isinstance(mdl.sim, (_fwdsims.MatrixForwardSimulator, _fwdsims.MapForwardSimulator)): - precomp_layout_circuit_cache = mdl.sim.create_copa_layout_circuit_cache(unique_circuits, mdl, dataset=dataset) + unique_circuits = {ckt for circuit_list in circuit_lists for ckt in circuit_list} + print(f'{len(unique_circuits)=}') + if isinstance(mdl.sim, _fwdsims.MatrixForwardSimulator): + precomp_layout_circuit_cache = _layouts.matrixlayout.create_matrix_copa_layout_circuit_cache(unique_circuits, mdl) else: precomp_layout_circuit_cache = None - + #print(completed_circuit_cache) for i, circuit_list in enumerate(circuit_lists): printer.log(f'Layout for iteration {i}', 2) precomp_layouts.append(mdl.sim.create_layout(circuit_list, dataset, resource_alloc, array_types, verbosity= printer - 1, - layout_creation_circuit_cache = precomp_layout_circuit_cache)) - - #precompute a cache of possible outcome counts for each circuits to accelerate MDC store creation - if isinstance(mdl, _models.model.OpModel): - if precomp_layout_circuit_cache is not None: #then grab the split circuits from there. - expanded_circuit_outcome_list = mdl.bulk_expand_instruments_and_separate_povm(unique_circuits, - completed_circuits= precomp_layout_circuit_cache['completed_circuits'].values()) - else: - expanded_circuit_outcome_list = mdl.bulk_expand_instruments_and_separate_povm(unique_circuits) - outcome_count_by_circuit_cache = {ckt: len(outcome_tup) for ckt,outcome_tup in zip(unique_circuits, expanded_circuit_outcome_list)} - else: - outcome_count_by_circuit_cache = {ckt: mdl.compute_num_outcomes(ckt) for ckt in unique_circuits} - + layout_creation_circuit_cache = precomp_layout_circuit_cache)) + with printer.progress_logging(1): for i in range(starting_index, len(circuit_lists)): circuitsToEstimate = circuit_lists[i] diff --git a/pygsti/circuits/circuit.py b/pygsti/circuits/circuit.py index 822fd0b65..3af80ec39 100644 --- a/pygsti/circuits/circuit.py +++ b/pygsti/circuits/circuit.py @@ -4443,6 +4443,106 @@ def done_editing(self): self._hashable_tup = self.tup self._hash = hash(self._hashable_tup) + def expand_instruments_and_separate_povm(self, model, observed_outcomes=None): + """ + Creates a dictionary of :class:`SeparatePOVMCircuit` objects from expanding the instruments of this circuit. + + Each key of the returned dictionary replaces the instruments in this circuit with a selection + of their members. (The size of the resulting dictionary is the product of the sizes of + each instrument appearing in this circuit when `observed_outcomes is None`). Keys are stored + as :class:`SeparatePOVMCircuit` objects so it's easy to keep track of which POVM outcomes (effects) + correspond to observed data. This function is, for the most part, used internally to process + a circuit before computing its outcome probabilities. + + Parameters + ---------- + model : Model + The model used to provide necessary details regarding the expansion, including: + + - default SPAM layers + - definitions of instrument-containing layers + - expansions of individual instruments and POVMs + + Returns + ------- + OrderedDict + A dict whose keys are :class:`SeparatePOVMCircuit` objects and whose + values are tuples of the outcome labels corresponding to this circuit, + one per POVM effect held in the key. + """ + complete_circuit = model.complete_circuit(self) + expanded_circuit_outcomes = _collections.OrderedDict() + povm_lbl = complete_circuit[-1] # "complete" circuits always end with a POVM label + circuit_without_povm = complete_circuit[0:len(complete_circuit) - 1] + + def create_tree(lst): + subs = _collections.OrderedDict() + for el in lst: + if len(el) > 0: + if el[0] not in subs: subs[el[0]] = [] + subs[el[0]].append(el[1:]) + return _collections.OrderedDict([(k, create_tree(sub_lst)) for k, sub_lst in subs.items()]) + + def add_expanded_circuit_outcomes(circuit, running_outcomes, ootree, start): + """ + """ + cir = circuit if start == 0 else circuit[start:] # for performance, avoid uneeded slicing + for k, layer_label in enumerate(cir, start=start): + components = layer_label.components + #instrument_inds = _np.nonzero([model._is_primitive_instrument_layer_lbl(component) + # for component in components])[0] # SLOWER than statement below + instrument_inds = _np.array([i for i, component in enumerate(components) + if model._is_primitive_instrument_layer_lbl(component)]) + if instrument_inds.size > 0: + # This layer contains at least one instrument => recurse with instrument(s) replaced with + # all combinations of their members. + component_lookup = {i: comp for i, comp in enumerate(components)} + instrument_members = [model._member_labels_for_instrument(components[i]) + for i in instrument_inds] # also components of outcome labels + for selected_instrmt_members in _itertools.product(*instrument_members): + expanded_layer_lbl = component_lookup.copy() + expanded_layer_lbl.update({i: components[i] + "_" + sel + for i, sel in zip(instrument_inds, selected_instrmt_members)}) + expanded_layer_lbl = _Label([expanded_layer_lbl[i] for i in range(len(components))]) + + if ootree is not None: + new_ootree = ootree + for sel in selected_instrmt_members: + new_ootree = new_ootree.get(sel, {}) + if len(new_ootree) == 0: continue # no observed outcomes along this outcome-tree path + else: + new_ootree = None + + add_expanded_circuit_outcomes(circuit[0:k] + Circuit((expanded_layer_lbl,)) + circuit[k + 1:], + running_outcomes + selected_instrmt_members, new_ootree, k + 1) + break + + else: # no more instruments to process: `cir` contains no instruments => add an expanded circuit + assert(circuit not in expanded_circuit_outcomes) # shouldn't be possible to generate duplicates... + elabels = model._effect_labels_for_povm(povm_lbl) if (observed_outcomes is None) \ + else tuple(ootree.keys()) + outcomes = tuple((running_outcomes + (elabel,) for elabel in elabels)) + expanded_circuit_outcomes[SeparatePOVMCircuit(circuit, povm_lbl, elabels)] = outcomes + + ootree = create_tree(observed_outcomes) if observed_outcomes is not None else None # tree of observed outcomes + # e.g. [('0','00'), ('0','01'), ('1','10')] ==> {'0': {'00': {}, '01': {}}, '1': {'10': {}}} + + if model._has_instruments(): + add_expanded_circuit_outcomes(circuit_without_povm, (), ootree, start=0) + else: + # It may be helpful to cache the set of elabels for a POVM (maybe within the model?) because + # currently the call to _effect_labels_for_povm may be a bottleneck. It's needed, even when we have + # observed outcomes, because there may be some observed outcomes that aren't modeled (e.g. leakage states) + if observed_outcomes is None: + elabels = model._effect_labels_for_povm(povm_lbl) + else: + possible_lbls = set(model._effect_labels_for_povm(povm_lbl)) + elabels = tuple([oo for oo in ootree.keys() if oo in possible_lbls]) + outcomes = tuple(((elabel,) for elabel in elabels)) + expanded_circuit_outcomes[SeparatePOVMCircuit(circuit_without_povm, povm_lbl, elabels)] = outcomes + + return expanded_circuit_outcomes + class CompressedCircuit(object): """ A "compressed" Circuit that requires less disk space. diff --git a/pygsti/forwardsims/matrixforwardsim.py b/pygsti/forwardsims/matrixforwardsim.py index 2952ddef0..2a50f62ef 100644 --- a/pygsti/forwardsims/matrixforwardsim.py +++ b/pygsti/forwardsims/matrixforwardsim.py @@ -1061,8 +1061,6 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, array_types layout_creation_circuit_cache : dict, optional (default None) A precomputed dictionary serving as a cache for completed circuits. I.e. circuits with prep labels and POVM labels appended. - Along with other useful pre-computed circuit structures used in layout - creation. Returns ------- diff --git a/pygsti/layouts/matrixlayout.py b/pygsti/layouts/matrixlayout.py index bfff25a31..d24ddadc1 100644 --- a/pygsti/layouts/matrixlayout.py +++ b/pygsti/layouts/matrixlayout.py @@ -71,11 +71,7 @@ class _MatrixCOPALayoutAtom(_DistributableAtom): """ def __init__(self, unique_complete_circuits, unique_nospam_circuits, circuits_by_unique_nospam_circuits, - ds_circuits, group, helpful_scratch, model, unique_circuits, dataset=None, expanded_and_separated_circuit_cache=None, - double_expanded_nospam_circuits_cache = None): - - if expanded_and_separated_circuit_cache is None: - expanded_and_separated_circuit_cache = dict() + ds_circuits, group, helpful_scratch, model, dataset=None, expanded_and_separated_circuit_cache=None): #Note: group gives unique_nospam_circuits indices, which circuits_by_unique_nospam_circuits # turns into "unique complete circuit" indices, which the layout via it's to_unique can map @@ -85,13 +81,16 @@ def add_expanded_circuits(indices, add_to_this_dict): for i in indices: nospam_c = unique_nospam_circuits[i] for unique_i in circuits_by_unique_nospam_circuits[nospam_c]: # "unique" circuits: add SPAM to nospam_c - #the cache is indexed into using the (potentially) incomplete circuits - expc_outcomes = expanded_and_separated_circuit_cache.get(unique_circuits[unique_i], None) - if expc_outcomes is None: #fall back on original non-cache behavior. + if expanded_and_separated_circuit_cache is None: observed_outcomes = None if (dataset is None) else dataset[ds_circuits[unique_i]].unique_outcomes expc_outcomes = model.expand_instruments_and_separate_povm(unique_complete_circuits[unique_i], observed_outcomes) - #and add this new value to the cache. - expanded_and_separated_circuit_cache[unique_circuits[unique_i]] = expc_outcomes + #Note: unique_complete_circuits may have duplicates (they're only unique *pre*-completion) + else: + expc_outcomes = expanded_and_separated_circuit_cache.get(unique_complete_circuits[unique_i], None) + if expc_outcomes is None: #fall back on original non-cache behavior. + observed_outcomes = None if (dataset is None) else dataset[ds_circuits[unique_i]].unique_outcomes + expc_outcomes = model.expand_instruments_and_separate_povm(unique_complete_circuits[unique_i], observed_outcomes) + for sep_povm_c, outcomes in expc_outcomes.items(): # for each expanded cir from unique_i-th circuit prep_lbl = sep_povm_c.circuit_without_povm[0] exp_nospam_c = sep_povm_c.circuit_without_povm[1:] # sep_povm_c *always* has prep lbl @@ -306,49 +305,41 @@ def __init__(self, circuits, model, dataset=None, num_sub_trees=None, num_tree_p ds_circuits = _lt.apply_aliases_to_circuits(unique_circuits, aliases) #extract subcaches from layout_creation_circuit_cache: - if layout_creation_circuit_cache is None: - layout_creation_circuit_cache = dict() - self.completed_circuit_cache = layout_creation_circuit_cache.get('completed_circuits', None) - self.split_circuit_cache = layout_creation_circuit_cache.get('split_circuits', None) - self.expanded_and_separated_circuits_cache = layout_creation_circuit_cache.get('expanded_and_separated_circuits', None) - self.expanded_subcircuits_no_spam_cache = layout_creation_circuit_cache.get('expanded_subcircuits_no_spam', None) + if layout_creation_circuit_cache is not None: + completed_circuit_cache = layout_creation_circuit_cache.get('completed_circuits', None) + split_circuit_cache = layout_creation_circuit_cache.get('split_circuits', None) + expanded_and_separated_circuits_cache = layout_creation_circuit_cache.get('expanded_and_separated_circuits', None) + else: + completed_circuit_cache = None + split_circuit_cache = None + expanded_and_separated_circuits_cache = None - if self.completed_circuit_cache is None: - unique_complete_circuits, split_unique_circuits = model.complete_circuits(unique_circuits, return_split=True) + if completed_circuit_cache is None: + unique_complete_circuits = [model.complete_circuit(c) for c in unique_circuits] else: unique_complete_circuits = [] for c in unique_circuits: - comp_ckt = self.completed_circuit_cache.get(c, None) - if comp_ckt is not None: + comp_ckt = completed_circuit_cache.get(c, None) + if completed_circuit_cache is not None: unique_complete_circuits.append(comp_ckt) else: unique_complete_circuits.append(model.complete_circuit(c)) - #Note: "unique" means a unique circuit *before* circuit-completion, so there could be duplicate # "unique circuits" after completion, e.g. "rho0Gx" and "Gx" could both complete to "rho0GxMdefault_0". - circuits_by_unique_nospam_circuits = dict() - if self.completed_circuit_cache is None: - for i, (_, nospam_c, _) in enumerate(split_unique_circuits): + circuits_by_unique_nospam_circuits = _collections.OrderedDict() + if completed_circuit_cache is None: + for i, c in enumerate(unique_complete_circuits): + _, nospam_c, _ = model.split_circuit(c) if nospam_c in circuits_by_unique_nospam_circuits: circuits_by_unique_nospam_circuits[nospam_c].append(i) else: circuits_by_unique_nospam_circuits[nospam_c] = [i] - #also create the split circuit cache at this point for future use. - if self.split_circuit_cache is None: - self.split_circuit_cache = {unique_ckt:split_ckt for unique_ckt, split_ckt in zip(unique_circuits, split_unique_circuits)} - else: - if self.split_circuit_cache is None: - self.split_circuit_cache = dict() - for i, (c_unique_complete, c_unique) in enumerate(zip(unique_complete_circuits, unique_circuits)): - split_ckt_tup = self.split_circuit_cache.get(c_unique, None) - nospam_c= split_ckt_tup[1] if split_ckt_tup is not None else None + for i, c in enumerate(unique_complete_circuits): + _, nospam_c, _ = split_circuit_cache.get(c, None) if nospam_c is None: - split_ckt_tup = model.split_circuit(c_unique_complete) - nospam_c= split_ckt_tup[1] - #also add this missing circuit to the cache for future use. - self.split_circuit_cache[c_unique] = split_ckt_tup + _, nospam_c, _ = model.split_circuit(c) if nospam_c in circuits_by_unique_nospam_circuits: circuits_by_unique_nospam_circuits[nospam_c].append(i) else: @@ -371,15 +362,40 @@ def _create_atom(args): group, helpful_scratch_group = args return _MatrixCOPALayoutAtom(unique_complete_circuits, unique_nospam_circuits, circuits_by_unique_nospam_circuits, ds_circuits, - group, helpful_scratch_group, model, - unique_circuits, dataset, - self.expanded_and_separated_circuits_cache, - self.expanded_subcircuits_no_spam_cache) + group, helpful_scratch_group, model, dataset, + expanded_and_separated_circuits_cache) super().__init__(circuits, unique_circuits, to_unique, unique_complete_circuits, _create_atom, list(zip(groups, helpful_scratch)), num_tree_processors, num_param_dimension_processors, param_dimensions, param_dimension_blk_sizes, resource_alloc, verbosity) + +def create_matrix_copa_layout_circuit_cache(circuits, model, dataset=None): + """ + Helper function for pre-computing/pre-processing circuits structures + used in matrix layout creation. + """ + cache = dict() + completed_circuits = {ckt: model.complete_circuit(ckt) for ckt in circuits} + cache['completed_circuits'] = completed_circuits + split_circuits = {ckt: model.split_circuit(ckt) for ckt in completed_circuits.values()} + cache['split_circuits'] = split_circuits + + expanded_circuit_cache = dict() + #There is some potential aliasing that happens in the init that I am not + #doing here, but I think 90+% of the time this ought to be fine. + if dataset is not None: + for ckt in completed_circuits.values(): + ds_row = dataset.get(ckt, None) + if ds_row is not None: + expanded_circuit_cache[ckt] = model.expand_instruments_and_separate_povm(ckt, ds_row.unique_outcomes) + else: + expanded_circuit_cache = {ckt: model.expand_instruments_and_separate_povm(ckt, None) + for ckt in completed_circuits.values()} + + cache['expanded_and_separated_circuits'] = expanded_circuit_cache + + return cache diff --git a/pygsti/models/model.py b/pygsti/models/model.py index 4f0c5bd9c..d4f38d3ec 100644 --- a/pygsti/models/model.py +++ b/pygsti/models/model.py @@ -1522,16 +1522,13 @@ def expand_instruments_and_separate_povm(self, circuit, observed_outcomes=None): Parameters ---------- - circuit : Circuit - The circuit to expand, using necessary details regarding the expansion from this model, including: + model : Model + The model used to provide necessary details regarding the expansion, including: - default SPAM layers - definitions of instrument-containing layers - expansions of individual instruments and POVMs - observed_outcomes : iterable, optional (default None) - If specified an iterable over the subset of outcomes empirically observed for this circuit. - Returns ------- OrderedDict @@ -1539,77 +1536,10 @@ def expand_instruments_and_separate_povm(self, circuit, observed_outcomes=None): values are tuples of the outcome labels corresponding to this circuit, one per POVM effect held in the key. """ - expanded_circuit_outcomes = self.bulk_expand_instruments_and_separate_povm([circuit], [observed_outcomes]) - return expanded_circuit_outcomes[0] - - def bulk_expand_instruments_and_separate_povm(self, circuits, observed_outcomes_list=None, split_circuits = None, - completed_circuits = None): - """ - Creates a list of dictionaries mapping from :class:`SeparatePOVMCircuit` - objects from expanding the instruments of this circuit. - - Each key of the returned dictionary replaces the instruments in this circuit with a selection - of their members. (The size of the resulting dictionary is the product of the sizes of - each instrument appearing in this circuit when `observed_outcomes is None`). Keys are stored - as :class:`SeparatePOVMCircuit` objects so it's easy to keep track of which POVM outcomes (effects) - correspond to observed data. This function is, for the most part, used internally to process - a circuit before computing its outcome probabilities. - - This function works similarly to expand_instruments_and_separate_povm, except it operates on - an entire list of circuits at once, and provides additional kwargs to accelerate computation. - - Parameters - ---------- - circuit : Circuit - The circuit to expand, using necessary details regarding the expansion from this model, including: - - - default SPAM layers - - definitions of instrument-containing layers - - expansions of individual instruments and POVMs - - observed_outcomes_list : list of iterables, optional (default None) - If specified a list of iterables over the subset of outcomes empirically observed for each circuit. - - split_circuits : list of tuples, optional (default None) - If specified, this is a list of tuples for each circuit corresponding to the splitting of - the circuit into the prep label, spam-free circuit, and povm label. This is the same format - produced by the :meth:split_circuit(s) method, and so this option can allow for accelerating this - method when that has previously been run. When using this kwarg only one of this or - the `complete_circuits` kwargs should be used. - - completed_circuits : list of Circuits, optional (default None) - If specified, this is a list of compeleted circuits with prep and povm labels included. - This is the format produced by the :meth:complete_circuit(s) method, and this can - be used to accelerate this method call when that has been previously run. Should not - be used in conjunction with `split_circuits`. - - Returns - ------- - list of OrderedDicts - A list of dictionaries whose keys are :class:`SeparatePOVMCircuit` objects and whose - values are tuples of the outcome labels corresponding to each circuit, - one per POVM effect held in the key. - """ - - assert(not (completed_circuits is not None and split_circuits is not None)), "Inclusion of non-trivial values"\ - +" for both `complete_circuits` and `split_circuits` is not supported. Please use only one of these two arguments." - - if split_circuits is not None: - povm_lbls = [split_ckt[2] for split_ckt in split_circuits] - circuits_without_povm = [(split_ckt[0],) + split_ckt[1] for split_ckt in split_circuits] - elif completed_circuits is not None: - povm_lbls = [comp_ckt[-1] for comp_ckt in completed_circuits] - circuits_without_povm = [comp_ckt[:-1] for comp_ckt in completed_circuits] - else: - completed_circuits = self.complete_circuits(circuits) - povm_lbls = [comp_ckt[-1] for comp_ckt in completed_circuits] - circuits_without_povm = [comp_ckt[:-1] for comp_ckt in completed_circuits] - - if observed_outcomes_list is None: - observed_outcomes_list = [None]*len(circuits) - - - expanded_circuit_outcomes_list = [_collections.OrderedDict() for _ in range(len(circuits))] + complete_circuit = self.complete_circuit(circuit) + expanded_circuit_outcomes = _collections.OrderedDict() + povm_lbl = complete_circuit[-1] # "complete" circuits always end with a POVM label + circuit_without_povm = complete_circuit[0:len(complete_circuit) - 1] def create_tree(lst): subs = _collections.OrderedDict() @@ -1660,31 +1590,24 @@ def add_expanded_circuit_outcomes(circuit, running_outcomes, ootree, start): outcomes = tuple((running_outcomes + (elabel,) for elabel in elabels)) expanded_circuit_outcomes[_SeparatePOVMCircuit(circuit, povm_lbl, elabels)] = outcomes - has_instruments = self._has_instruments() - unique_povm_labels = set(povm_lbls) - effect_label_dict = {povm_lbl: self._effect_labels_for_povm(povm_lbl) for povm_lbl in unique_povm_labels} + ootree = create_tree(observed_outcomes) if observed_outcomes is not None else None # tree of observed outcomes + # e.g. [('0','00'), ('0','01'), ('1','10')] ==> {'0': {'00': {}, '01': {}}, '1': {'10': {}}} - for povm_lbl, circuit_without_povm, expanded_circuit_outcomes, observed_outcomes in zip(povm_lbls, circuits_without_povm, - expanded_circuit_outcomes_list, - observed_outcomes_list): - ootree = create_tree(observed_outcomes) if observed_outcomes is not None else None # tree of observed outcomes - # e.g. [('0','00'), ('0','01'), ('1','10')] ==> {'0': {'00': {}, '01': {}}, '1': {'10': {}}} - - if has_instruments: - add_expanded_circuit_outcomes(circuit_without_povm, (), ootree, start=0) + if self._has_instruments(): + add_expanded_circuit_outcomes(circuit_without_povm, (), ootree, start=0) + else: + # It may be helpful to cache the set of elabels for a POVM (maybe within the model?) because + # currently the call to _effect_labels_for_povm may be a bottleneck. It's needed, even when we have + # observed outcomes, because there may be some observed outcomes that aren't modeled (e.g. leakage states) + if observed_outcomes is None: + elabels = self._effect_labels_for_povm(povm_lbl) else: - # It may be helpful to cache the set of elabels for a POVM (maybe within the model?) because - # currently the call to _effect_labels_for_povm may be a bottleneck. It's needed, even when we have - # observed outcomes, because there may be some observed outcomes that aren't modeled (e.g. leakage states) - if observed_outcomes is None: - elabels = effect_label_dict[povm_lbl] - else: - possible_lbls = set(effect_label_dict[povm_lbl]) - elabels = tuple([oo for oo in ootree.keys() if oo in possible_lbls]) - outcomes = tuple(((elabel,) for elabel in elabels)) - expanded_circuit_outcomes[_SeparatePOVMCircuit(circuit_without_povm, povm_lbl, elabels)] = outcomes + possible_lbls = set(self._effect_labels_for_povm(povm_lbl)) + elabels = tuple([oo for oo in ootree.keys() if oo in possible_lbls]) + outcomes = tuple(((elabel,) for elabel in elabels)) + expanded_circuit_outcomes[_SeparatePOVMCircuit(circuit_without_povm, povm_lbl, elabels)] = outcomes - return expanded_circuit_outcomes_list + return expanded_circuit_outcomes def complete_circuits(self, circuits, prep_lbl_to_prepend=None, povm_lbl_to_append=None, return_split = False): """ From 6383cc3a248d56a526240a3e55e224490c2516be Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Wed, 29 May 2024 17:15:06 -0600 Subject: [PATCH 07/41] Add caching for spam-free circuit expansion Cache the expanded SPAM-free circuits to reduce recomputing things unnecessarily. --- pygsti/algorithms/core.py | 2 +- pygsti/layouts/matrixlayout.py | 60 +++++++++++++++++++++++----------- 2 files changed, 42 insertions(+), 20 deletions(-) diff --git a/pygsti/algorithms/core.py b/pygsti/algorithms/core.py index 8f585d5fd..64835c926 100644 --- a/pygsti/algorithms/core.py +++ b/pygsti/algorithms/core.py @@ -878,9 +878,9 @@ def _max_array_types(artypes_list): # get the maximum number of each array type #The ModelDatasetCircuitsStore printer.log('Precomputing CircuitOutcomeProbabilityArray layouts for each iteration.', 2) precomp_layouts = [] + #pre-compute a dictionary caching completed circuits for layout construction performance. unique_circuits = {ckt for circuit_list in circuit_lists for ckt in circuit_list} - print(f'{len(unique_circuits)=}') if isinstance(mdl.sim, _fwdsims.MatrixForwardSimulator): precomp_layout_circuit_cache = _layouts.matrixlayout.create_matrix_copa_layout_circuit_cache(unique_circuits, mdl) else: diff --git a/pygsti/layouts/matrixlayout.py b/pygsti/layouts/matrixlayout.py index d24ddadc1..a42888b93 100644 --- a/pygsti/layouts/matrixlayout.py +++ b/pygsti/layouts/matrixlayout.py @@ -71,7 +71,8 @@ class _MatrixCOPALayoutAtom(_DistributableAtom): """ def __init__(self, unique_complete_circuits, unique_nospam_circuits, circuits_by_unique_nospam_circuits, - ds_circuits, group, helpful_scratch, model, dataset=None, expanded_and_separated_circuit_cache=None): + ds_circuits, group, helpful_scratch, model, dataset=None, expanded_and_separated_circuit_cache=None, + double_expanded_nospam_circuits_cache = None): #Note: group gives unique_nospam_circuits indices, which circuits_by_unique_nospam_circuits # turns into "unique complete circuit" indices, which the layout via it's to_unique can map @@ -119,24 +120,34 @@ def add_expanded_circuits(indices, add_to_this_dict): add_expanded_circuits(group, expanded_nospam_circuit_outcomes) expanded_nospam_circuits = {i:cir for i, cir in enumerate(expanded_nospam_circuit_outcomes.keys())} + #print(f'{expanded_nospam_circuits=}') + # add suggested scratch to the "final" elements as far as the tree creation is concerned # - this allows these scratch element to help balance the tree. if helpful_scratch: expanded_nospam_circuit_outcomes_plus_scratch = expanded_nospam_circuit_outcomes.copy() add_expanded_circuits(helpful_scratch, expanded_nospam_circuit_outcomes_plus_scratch) - expanded_nospam_circuits_plus_scratch = {i:cir for i, cir in enumerate(expanded_nospam_circuit_outcomes_plus_scratch.keys())} + expanded_nospam_circuits_plus_scratch = _collections.OrderedDict( + [(i, cir) for i, cir in enumerate(expanded_nospam_circuit_outcomes_plus_scratch.keys())]) else: expanded_nospam_circuits_plus_scratch = expanded_nospam_circuits.copy() - - if double_expanded_nospam_circuits_cache is None: - double_expanded_nospam_circuits_cache = dict() - double_expanded_nospam_circuits_plus_scratch = dict() - for i, cir in expanded_nospam_circuits_plus_scratch.items(): - # expand sub-circuits for a more efficient tree - double_expanded_ckt = double_expanded_nospam_circuits_cache.get(cir, None) - if double_expanded_ckt is None: #Fall back to standard behavior and do expansion. - double_expanded_ckt = cir.expand_subcircuits() - double_expanded_nospam_circuits_plus_scratch[i] = double_expanded_ckt + + double_expanded_nospam_circuits_plus_scratch = _collections.OrderedDict() + if double_expanded_nospam_circuits_cache is not None: + for i, cir in expanded_nospam_circuits_plus_scratch.items(): + # expand sub-circuits for a more efficient tree + double_expanded_ckt = double_expanded_nospam_circuits_cache.get(cir, None) + if double_expanded_ckt is None: #Fall back to standard behavior and do expansion. + double_expanded_nospam_circuits_plus_scratch[i] = cir.expand_subcircuits() + else: + double_expanded_nospam_circuits_plus_scratch[i] = double_expanded_ckt + else: + for i, cir in expanded_nospam_circuits_plus_scratch.items(): + # expand sub-circuits for a more efficient tree + double_expanded_nospam_circuits_plus_scratch[i] = cir.expand_subcircuits() + + #print(f'{double_expanded_nospam_circuits_plus_scratch=}') + #print(f'{double_expanded_nospam_circuits_plus_scratch == expanded_nospam_circuits}') self.tree = _EvalTree.create(double_expanded_nospam_circuits_plus_scratch) #print("Atom tree: %d circuits => tree of size %d" % (len(expanded_nospam_circuits), len(self.tree))) @@ -309,10 +320,12 @@ def __init__(self, circuits, model, dataset=None, num_sub_trees=None, num_tree_p completed_circuit_cache = layout_creation_circuit_cache.get('completed_circuits', None) split_circuit_cache = layout_creation_circuit_cache.get('split_circuits', None) expanded_and_separated_circuits_cache = layout_creation_circuit_cache.get('expanded_and_separated_circuits', None) + expanded_subcircuits_no_spam_cache = layout_creation_circuit_cache.get('expanded_subcircuits_no_spam', None) else: completed_circuit_cache = None split_circuit_cache = None expanded_and_separated_circuits_cache = None + expanded_subcircuits_no_spam_cache = None if completed_circuit_cache is None: unique_complete_circuits = [model.complete_circuit(c) for c in unique_circuits] @@ -363,7 +376,8 @@ def _create_atom(args): return _MatrixCOPALayoutAtom(unique_complete_circuits, unique_nospam_circuits, circuits_by_unique_nospam_circuits, ds_circuits, group, helpful_scratch_group, model, dataset, - expanded_and_separated_circuits_cache) + expanded_and_separated_circuits_cache, + expanded_subcircuits_no_spam_cache) super().__init__(circuits, unique_circuits, to_unique, unique_complete_circuits, _create_atom, list(zip(groups, helpful_scratch)), num_tree_processors, @@ -376,10 +390,10 @@ def create_matrix_copa_layout_circuit_cache(circuits, model, dataset=None): used in matrix layout creation. """ cache = dict() - completed_circuits = {ckt: model.complete_circuit(ckt) for ckt in circuits} - cache['completed_circuits'] = completed_circuits - split_circuits = {ckt: model.split_circuit(ckt) for ckt in completed_circuits.values()} - cache['split_circuits'] = split_circuits + completed_circuits, split_circuits = model.complete_circuits(circuits, return_split=True) + + cache['completed_circuits'] = {ckt: comp_ckt for ckt, comp_ckt in zip(circuits, completed_circuits)} + cache['split_circuits'] = {ckt: split_ckt for ckt, split_ckt in zip(cache['completed_circuits'].values(), split_circuits)} expanded_circuit_cache = dict() #There is some potential aliasing that happens in the init that I am not @@ -391,10 +405,18 @@ def create_matrix_copa_layout_circuit_cache(circuits, model, dataset=None): expanded_circuit_cache[ckt] = model.expand_instruments_and_separate_povm(ckt, ds_row.unique_outcomes) else: expanded_circuit_cache = {ckt: model.expand_instruments_and_separate_povm(ckt, None) - for ckt in completed_circuits.values()} + for ckt in cache['completed_circuits'].values()} cache['expanded_and_separated_circuits'] = expanded_circuit_cache - + + expanded_subcircuits_no_spam_cache = dict() + for expc_outcomes in cache['expanded_and_separated_circuits'].values(): + for sep_povm_c, _ in expc_outcomes.items(): # for each expanded cir from unique_i-th circuit + exp_nospam_c = sep_povm_c.circuit_without_povm[1:] + expanded_subcircuits_no_spam_cache[exp_nospam_c] = exp_nospam_c.expand_subcircuits() + + cache['expanded_subcircuits_no_spam'] = expanded_subcircuits_no_spam_cache + return cache From 093247b50810bb006e8b40a7619f8b3e5993fcd0 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Wed, 29 May 2024 23:47:58 -0600 Subject: [PATCH 08/41] New method for doing bulk intrument/effect expansion Adds a new method to OpModel that allows for doing instrument expansion and povm expansion in bulk, speeding things up be avoiding recomputation of shared quantities. Also adds a pipeline for re-using completed or split circuits (as produced by the related OpModel methods) for more efficient re-use of done work. --- pygsti/layouts/matrixlayout.py | 14 +-- pygsti/models/model.py | 151 ++++++++++++++++++++++++++++++++- 2 files changed, 158 insertions(+), 7 deletions(-) diff --git a/pygsti/layouts/matrixlayout.py b/pygsti/layouts/matrixlayout.py index a42888b93..44fe49e65 100644 --- a/pygsti/layouts/matrixlayout.py +++ b/pygsti/layouts/matrixlayout.py @@ -395,18 +395,22 @@ def create_matrix_copa_layout_circuit_cache(circuits, model, dataset=None): cache['completed_circuits'] = {ckt: comp_ckt for ckt, comp_ckt in zip(circuits, completed_circuits)} cache['split_circuits'] = {ckt: split_ckt for ckt, split_ckt in zip(cache['completed_circuits'].values(), split_circuits)} - expanded_circuit_cache = dict() #There is some potential aliasing that happens in the init that I am not #doing here, but I think 90+% of the time this ought to be fine. if dataset is not None: + unique_outcomes_list = [] for ckt in completed_circuits.values(): ds_row = dataset.get(ckt, None) - if ds_row is not None: - expanded_circuit_cache[ckt] = model.expand_instruments_and_separate_povm(ckt, ds_row.unique_outcomes) + unique_outcomes_list.append(ds_row.unique_outcomes if ds_row is not None else None) else: - expanded_circuit_cache = {ckt: model.expand_instruments_and_separate_povm(ckt, None) - for ckt in cache['completed_circuits'].values()} + unique_outcomes_list = [None]*len(circuits) + expanded_circuit_outcome_list = model.bulk_expand_instruments_and_separate_povm(circuits, + observed_outcomes_list = unique_outcomes_list, + split_circuits = split_circuits) + + expanded_circuit_cache = {ckt: expanded_ckt for ckt,expanded_ckt in zip(cache['completed_circuits'].values(), expanded_circuit_outcome_list)} + cache['expanded_and_separated_circuits'] = expanded_circuit_cache expanded_subcircuits_no_spam_cache = dict() diff --git a/pygsti/models/model.py b/pygsti/models/model.py index d4f38d3ec..3d986546d 100644 --- a/pygsti/models/model.py +++ b/pygsti/models/model.py @@ -1522,13 +1522,16 @@ def expand_instruments_and_separate_povm(self, circuit, observed_outcomes=None): Parameters ---------- - model : Model - The model used to provide necessary details regarding the expansion, including: + circuit : Circuit + The circuit to expand, using necessary details regarding the expansion from this model, including: - default SPAM layers - definitions of instrument-containing layers - expansions of individual instruments and POVMs + observed_outcomes : iterable, optional (default None) + If specified an iterable over the subset of outcomes empirically observed for this circuit. + Returns ------- OrderedDict @@ -1608,6 +1611,150 @@ def add_expanded_circuit_outcomes(circuit, running_outcomes, ootree, start): expanded_circuit_outcomes[_SeparatePOVMCircuit(circuit_without_povm, povm_lbl, elabels)] = outcomes return expanded_circuit_outcomes + + def bulk_expand_instruments_and_separate_povm(self, circuits, observed_outcomes_list=None, split_circuits = None, + completed_circuits = None): + """ + Creates a list of dictionaries mapping from :class:`SeparatePOVMCircuit` + objects from expanding the instruments of this circuit. + + Each key of the returned dictionary replaces the instruments in this circuit with a selection + of their members. (The size of the resulting dictionary is the product of the sizes of + each instrument appearing in this circuit when `observed_outcomes is None`). Keys are stored + as :class:`SeparatePOVMCircuit` objects so it's easy to keep track of which POVM outcomes (effects) + correspond to observed data. This function is, for the most part, used internally to process + a circuit before computing its outcome probabilities. + + This function works similarly to expand_instruments_and_separate_povm, except it operates on + an entire list of circuits at once, and provides additional kwargs to accelerate computation. + + Parameters + ---------- + circuit : Circuit + The circuit to expand, using necessary details regarding the expansion from this model, including: + + - default SPAM layers + - definitions of instrument-containing layers + - expansions of individual instruments and POVMs + + observed_outcomes_list : list of iterables, optional (default None) + If specified a list of iterables over the subset of outcomes empirically observed for each circuit. + + split_circuits : list of tuples, optional (default None) + If specified, this is a list of tuples for each circuit corresponding to the splitting of + the circuit into the prep label, spam-free circuit, and povm label. This is the same format + produced by the :meth:split_circuit(s) method, and so this option can allow for accelerating this + method when that has previously been run. When using this kwarg only one of this or + the `complete_circuits` kwargs should be used. + + complete_circuits : list of Circuits, optional (default None) + If specified, this is a list of compeleted circuits with prep and povm labels included. + This is the format produced by the :meth:complete_circuit(s) method, and this can + be used to accelerate this method call when that has been previously run. Should not + be used in conjunction with `split_circuits`. + + Returns + ------- + OrderedDict + A dict whose keys are :class:`SeparatePOVMCircuit` objects and whose + values are tuples of the outcome labels corresponding to this circuit, + one per POVM effect held in the key. + """ + + assert(not (completed_circuits is not None and split_circuits is not None)), "Inclusion of non-trivial values"\ + +" for both `complete_circuits` and `split_circuits` is not supported. Please use only one of these two arguments." + + if split_circuits is not None: + povm_lbls = [split_ckt[2] for split_ckt in split_circuits] + circuits_without_povm = [(split_ckt[0],) + split_ckt[1] for split_ckt in split_circuits] + elif completed_circuits is not None: + povm_lbls = [comp_ckt[-1] for comp_ckt in completed_circuits] + circuits_without_povm = [comp_ckt[:-1] for comp_ckt in completed_circuits] + else: + completed_circuits = self.complete_circuits(circuits) + povm_lbls = [comp_ckt[-1] for comp_ckt in completed_circuits] + circuits_without_povm = [comp_ckt[:-1] for comp_ckt in completed_circuits] + + if observed_outcomes_list is None: + observed_outcomes_list = [None]*len(circuits) + + + expanded_circuit_outcomes_list = [_collections.OrderedDict() for _ in range(len(circuits))] + + def create_tree(lst): + subs = _collections.OrderedDict() + for el in lst: + if len(el) > 0: + if el[0] not in subs: subs[el[0]] = [] + subs[el[0]].append(el[1:]) + return _collections.OrderedDict([(k, create_tree(sub_lst)) for k, sub_lst in subs.items()]) + + def add_expanded_circuit_outcomes(circuit, running_outcomes, ootree, start): + """ + """ + cir = circuit if start == 0 else circuit[start:] # for performance, avoid uneeded slicing + for k, layer_label in enumerate(cir, start=start): + components = layer_label.components + #instrument_inds = _np.nonzero([model._is_primitive_instrument_layer_lbl(component) + # for component in components])[0] # SLOWER than statement below + instrument_inds = _np.array([i for i, component in enumerate(components) + if self._is_primitive_instrument_layer_lbl(component)]) + if instrument_inds.size > 0: + # This layer contains at least one instrument => recurse with instrument(s) replaced with + # all combinations of their members. + component_lookup = {i: comp for i, comp in enumerate(components)} + instrument_members = [self._member_labels_for_instrument(components[i]) + for i in instrument_inds] # also components of outcome labels + for selected_instrmt_members in _itertools.product(*instrument_members): + expanded_layer_lbl = component_lookup.copy() + expanded_layer_lbl.update({i: components[i] + "_" + sel + for i, sel in zip(instrument_inds, selected_instrmt_members)}) + expanded_layer_lbl = _Label([expanded_layer_lbl[i] for i in range(len(components))]) + + if ootree is not None: + new_ootree = ootree + for sel in selected_instrmt_members: + new_ootree = new_ootree.get(sel, {}) + if len(new_ootree) == 0: continue # no observed outcomes along this outcome-tree path + else: + new_ootree = None + + add_expanded_circuit_outcomes(circuit[0:k] + _Circuit((expanded_layer_lbl,)) + circuit[k + 1:], + running_outcomes + selected_instrmt_members, new_ootree, k + 1) + break + + else: # no more instruments to process: `cir` contains no instruments => add an expanded circuit + assert(circuit not in expanded_circuit_outcomes) # shouldn't be possible to generate duplicates... + elabels = self._effect_labels_for_povm(povm_lbl) if (observed_outcomes is None) \ + else tuple(ootree.keys()) + outcomes = tuple((running_outcomes + (elabel,) for elabel in elabels)) + expanded_circuit_outcomes[_SeparatePOVMCircuit(circuit, povm_lbl, elabels)] = outcomes + + has_instruments = self._has_instruments() + unique_povm_labels = set(povm_lbls) + effect_label_dict = {povm_lbl: self._effect_labels_for_povm(povm_lbl) for povm_lbl in unique_povm_labels} + + for povm_lbl, circuit_without_povm, expanded_circuit_outcomes, observed_outcomes in zip(povm_lbls, circuits_without_povm, + expanded_circuit_outcomes_list, + observed_outcomes_list): + ootree = create_tree(observed_outcomes) if observed_outcomes is not None else None # tree of observed outcomes + # e.g. [('0','00'), ('0','01'), ('1','10')] ==> {'0': {'00': {}, '01': {}}, '1': {'10': {}}} + + if has_instruments: + add_expanded_circuit_outcomes(circuit_without_povm, (), ootree, start=0) + else: + # It may be helpful to cache the set of elabels for a POVM (maybe within the model?) because + # currently the call to _effect_labels_for_povm may be a bottleneck. It's needed, even when we have + # observed outcomes, because there may be some observed outcomes that aren't modeled (e.g. leakage states) + if observed_outcomes is None: + elabels = effect_label_dict[povm_lbl] + else: + possible_lbls = set(effect_label_dict[povm_lbl]) + elabels = tuple([oo for oo in ootree.keys() if oo in possible_lbls]) + outcomes = tuple(((elabel,) for elabel in elabels)) + expanded_circuit_outcomes[_SeparatePOVMCircuit(circuit_without_povm, povm_lbl, elabels)] = outcomes + + return expanded_circuit_outcomes_list def complete_circuits(self, circuits, prep_lbl_to_prepend=None, povm_lbl_to_append=None, return_split = False): """ From 9d578845d4d0de7fa6fab3933b3b6673d409d01a Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Thu, 30 May 2024 12:33:37 -0600 Subject: [PATCH 09/41] Minor COPA Layout __init__ tweaks Some minor performance oriented tweaks to the init for COPA layouts. --- pygsti/layouts/copalayout.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pygsti/layouts/copalayout.py b/pygsti/layouts/copalayout.py index 34e907d8f..257babc2e 100644 --- a/pygsti/layouts/copalayout.py +++ b/pygsti/layouts/copalayout.py @@ -205,8 +205,8 @@ def __init__(self, circuits, unique_circuits, to_unique, elindex_outcome_tuples, assert(len(indices) == self._size), \ "Inconsistency: %d distinct indices but max index + 1 is %d!" % (len(indices), self._size) - self._outcomes = dict() - self._element_indices = dict() + self._outcomes = dict() #_collections.OrderedDict() + self._element_indices = dict() #_collections.OrderedDict() sort_idx_func = lambda x: x[0] for i_unique, tuples in elindex_outcome_tuples.items(): sorted_tuples = sorted(tuples, key=sort_idx_func) # sort by element index From aa9519d6441675a59731f3bc013b418a31629177 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Thu, 30 May 2024 13:01:33 -0600 Subject: [PATCH 10/41] Refactor some OrderedDicts into regular ones Refactor some of the ordered dictionaries in matrix layout creation into regular ones. --- pygsti/layouts/copalayout.py | 4 ++-- pygsti/layouts/matrixlayout.py | 9 ++------- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/pygsti/layouts/copalayout.py b/pygsti/layouts/copalayout.py index 257babc2e..34e907d8f 100644 --- a/pygsti/layouts/copalayout.py +++ b/pygsti/layouts/copalayout.py @@ -205,8 +205,8 @@ def __init__(self, circuits, unique_circuits, to_unique, elindex_outcome_tuples, assert(len(indices) == self._size), \ "Inconsistency: %d distinct indices but max index + 1 is %d!" % (len(indices), self._size) - self._outcomes = dict() #_collections.OrderedDict() - self._element_indices = dict() #_collections.OrderedDict() + self._outcomes = dict() + self._element_indices = dict() sort_idx_func = lambda x: x[0] for i_unique, tuples in elindex_outcome_tuples.items(): sorted_tuples = sorted(tuples, key=sort_idx_func) # sort by element index diff --git a/pygsti/layouts/matrixlayout.py b/pygsti/layouts/matrixlayout.py index 44fe49e65..8e35e5c1a 100644 --- a/pygsti/layouts/matrixlayout.py +++ b/pygsti/layouts/matrixlayout.py @@ -120,19 +120,16 @@ def add_expanded_circuits(indices, add_to_this_dict): add_expanded_circuits(group, expanded_nospam_circuit_outcomes) expanded_nospam_circuits = {i:cir for i, cir in enumerate(expanded_nospam_circuit_outcomes.keys())} - #print(f'{expanded_nospam_circuits=}') - # add suggested scratch to the "final" elements as far as the tree creation is concerned # - this allows these scratch element to help balance the tree. if helpful_scratch: expanded_nospam_circuit_outcomes_plus_scratch = expanded_nospam_circuit_outcomes.copy() add_expanded_circuits(helpful_scratch, expanded_nospam_circuit_outcomes_plus_scratch) - expanded_nospam_circuits_plus_scratch = _collections.OrderedDict( - [(i, cir) for i, cir in enumerate(expanded_nospam_circuit_outcomes_plus_scratch.keys())]) + expanded_nospam_circuits_plus_scratch = {i:cir for i, cir in enumerate(expanded_nospam_circuit_outcomes_plus_scratch.keys())} else: expanded_nospam_circuits_plus_scratch = expanded_nospam_circuits.copy() - double_expanded_nospam_circuits_plus_scratch = _collections.OrderedDict() + double_expanded_nospam_circuits_plus_scratch = dict() if double_expanded_nospam_circuits_cache is not None: for i, cir in expanded_nospam_circuits_plus_scratch.items(): # expand sub-circuits for a more efficient tree @@ -146,8 +143,6 @@ def add_expanded_circuits(indices, add_to_this_dict): # expand sub-circuits for a more efficient tree double_expanded_nospam_circuits_plus_scratch[i] = cir.expand_subcircuits() - #print(f'{double_expanded_nospam_circuits_plus_scratch=}') - #print(f'{double_expanded_nospam_circuits_plus_scratch == expanded_nospam_circuits}') self.tree = _EvalTree.create(double_expanded_nospam_circuits_plus_scratch) #print("Atom tree: %d circuits => tree of size %d" % (len(expanded_nospam_circuits), len(self.tree))) From 957facd94d8b0bfbd626c2a8212601fe72ca70e2 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Fri, 31 May 2024 18:32:55 -0600 Subject: [PATCH 11/41] Start the process of adding caching to MDC store creation Start adding infrastructure for caching things used in MDC store creation and for plumbing in stuff from layout creation. --- pygsti/algorithms/core.py | 15 ++++++- pygsti/layouts/matrixlayout.py | 61 +++++++++++++++++------------ pygsti/models/model.py | 8 ++-- pygsti/objectivefns/objectivefns.py | 38 ++++++------------ 4 files changed, 65 insertions(+), 57 deletions(-) diff --git a/pygsti/algorithms/core.py b/pygsti/algorithms/core.py index 64835c926..fc367dfc1 100644 --- a/pygsti/algorithms/core.py +++ b/pygsti/algorithms/core.py @@ -885,12 +885,25 @@ def _max_array_types(artypes_list): # get the maximum number of each array type precomp_layout_circuit_cache = _layouts.matrixlayout.create_matrix_copa_layout_circuit_cache(unique_circuits, mdl) else: precomp_layout_circuit_cache = None - #print(completed_circuit_cache) + for i, circuit_list in enumerate(circuit_lists): printer.log(f'Layout for iteration {i}', 2) precomp_layouts.append(mdl.sim.create_layout(circuit_list, dataset, resource_alloc, array_types, verbosity= printer - 1, layout_creation_circuit_cache = precomp_layout_circuit_cache)) + #precompute a cache of possible outcome counts for each circuits to accelerate MDC store creation + if isinstance(mdl, _models.model.OpModel): + if precomp_layout_circuit_cache is not None: #then grab the split circuits from there. + expanded_circuit_outcome_list = mdl.bulk_expand_instruments_and_separate_povm(unique_circuits, + split_circuits = precomp_layout_circuit_cache['split_circuits']) + outcome_count_by_circuit_cache = {ckt: len(outcome_tup) for ckt,outcome_tup in zip(unique_circuits, expanded_circuit_outcome_list)} + else: + expanded_circuit_outcome_list = mdl.bulk_expand_instruments_and_separate_povm(unique_circuits) + outcome_count_by_circuit_cache = {ckt: len(outcome_tup) for ckt,outcome_tup in zip(unique_circuits, expanded_circuit_outcome_list)} + else: + outcome_count_by_circuit_cache = {ckt: mdl.compute_num_outcomes(ckt) for ckt in unique_circuits} + + with printer.progress_logging(1): for i in range(starting_index, len(circuit_lists)): circuitsToEstimate = circuit_lists[i] diff --git a/pygsti/layouts/matrixlayout.py b/pygsti/layouts/matrixlayout.py index 8e35e5c1a..fadc472b8 100644 --- a/pygsti/layouts/matrixlayout.py +++ b/pygsti/layouts/matrixlayout.py @@ -71,7 +71,7 @@ class _MatrixCOPALayoutAtom(_DistributableAtom): """ def __init__(self, unique_complete_circuits, unique_nospam_circuits, circuits_by_unique_nospam_circuits, - ds_circuits, group, helpful_scratch, model, dataset=None, expanded_and_separated_circuit_cache=None, + ds_circuits, group, helpful_scratch, model, unique_circuits, dataset=None, expanded_and_separated_circuit_cache=None, double_expanded_nospam_circuits_cache = None): #Note: group gives unique_nospam_circuits indices, which circuits_by_unique_nospam_circuits @@ -87,11 +87,13 @@ def add_expanded_circuits(indices, add_to_this_dict): expc_outcomes = model.expand_instruments_and_separate_povm(unique_complete_circuits[unique_i], observed_outcomes) #Note: unique_complete_circuits may have duplicates (they're only unique *pre*-completion) else: - expc_outcomes = expanded_and_separated_circuit_cache.get(unique_complete_circuits[unique_i], None) + #the cache is indexed into using the (potentially) incomplete circuits + expc_outcomes = expanded_and_separated_circuit_cache.get(unique_circuits[unique_i], None) if expc_outcomes is None: #fall back on original non-cache behavior. observed_outcomes = None if (dataset is None) else dataset[ds_circuits[unique_i]].unique_outcomes expc_outcomes = model.expand_instruments_and_separate_povm(unique_complete_circuits[unique_i], observed_outcomes) - + #and add this new value to the cache. + expanded_and_separated_circuit_cache[unique_circuits[unique_i]] = expc_outcomes for sep_povm_c, outcomes in expc_outcomes.items(): # for each expanded cir from unique_i-th circuit prep_lbl = sep_povm_c.circuit_without_povm[0] exp_nospam_c = sep_povm_c.circuit_without_povm[1:] # sep_povm_c *always* has prep lbl @@ -312,23 +314,23 @@ def __init__(self, circuits, model, dataset=None, num_sub_trees=None, num_tree_p #extract subcaches from layout_creation_circuit_cache: if layout_creation_circuit_cache is not None: - completed_circuit_cache = layout_creation_circuit_cache.get('completed_circuits', None) - split_circuit_cache = layout_creation_circuit_cache.get('split_circuits', None) - expanded_and_separated_circuits_cache = layout_creation_circuit_cache.get('expanded_and_separated_circuits', None) - expanded_subcircuits_no_spam_cache = layout_creation_circuit_cache.get('expanded_subcircuits_no_spam', None) + self.completed_circuit_cache = layout_creation_circuit_cache.get('completed_circuits', None) + self.split_circuit_cache = layout_creation_circuit_cache.get('split_circuits', None) + self.expanded_and_separated_circuits_cache = layout_creation_circuit_cache.get('expanded_and_separated_circuits', None) + self.expanded_subcircuits_no_spam_cache = layout_creation_circuit_cache.get('expanded_subcircuits_no_spam', None) else: - completed_circuit_cache = None - split_circuit_cache = None - expanded_and_separated_circuits_cache = None - expanded_subcircuits_no_spam_cache = None + self.completed_circuit_cache = None + self.split_circuit_cache = None + self.expanded_and_separated_circuits_cache = None + self.expanded_subcircuits_no_spam_cache = None - if completed_circuit_cache is None: - unique_complete_circuits = [model.complete_circuit(c) for c in unique_circuits] + if self.completed_circuit_cache is None: + unique_complete_circuits, split_unique_circuits = model.complete_circuits(unique_circuits, return_split=True) else: unique_complete_circuits = [] for c in unique_circuits: - comp_ckt = completed_circuit_cache.get(c, None) - if completed_circuit_cache is not None: + comp_ckt = self.completed_circuit_cache.get(c, None) + if comp_ckt is not None: unique_complete_circuits.append(comp_ckt) else: unique_complete_circuits.append(model.complete_circuit(c)) @@ -336,18 +338,24 @@ def __init__(self, circuits, model, dataset=None, num_sub_trees=None, num_tree_p # "unique circuits" after completion, e.g. "rho0Gx" and "Gx" could both complete to "rho0GxMdefault_0". circuits_by_unique_nospam_circuits = _collections.OrderedDict() - if completed_circuit_cache is None: - for i, c in enumerate(unique_complete_circuits): - _, nospam_c, _ = model.split_circuit(c) + if self.completed_circuit_cache is None: + for i, (_, nospam_c, _) in enumerate(split_unique_circuits): if nospam_c in circuits_by_unique_nospam_circuits: circuits_by_unique_nospam_circuits[nospam_c].append(i) else: circuits_by_unique_nospam_circuits[nospam_c] = [i] + #also create the split circuit cache at this point for future use. + self.split_circuit_cache = {unique_ckt:split_ckt for unique_ckt, split_ckt in zip(unique_circuits, split_unique_circuits)} + else: - for i, c in enumerate(unique_complete_circuits): - _, nospam_c, _ = split_circuit_cache.get(c, None) + for i, (c_unique_complete, c_unique) in enumerate(zip(unique_complete_circuits, unique_circuits)): + split_ckt_tup = self.split_circuit_cache.get(c_unique, None) + nospam_c= split_ckt_tup[1] if nospam_c is None: - _, nospam_c, _ = model.split_circuit(c) + split_ckt_tup = model.split_circuit(c_unique_complete) + nospam_c= split_ckt_tup[1] + #also add this missing circuit to the cache for future use. + self.split_circuit_cache[c_unique] = split_ckt_tup if nospam_c in circuits_by_unique_nospam_circuits: circuits_by_unique_nospam_circuits[nospam_c].append(i) else: @@ -370,9 +378,10 @@ def _create_atom(args): group, helpful_scratch_group = args return _MatrixCOPALayoutAtom(unique_complete_circuits, unique_nospam_circuits, circuits_by_unique_nospam_circuits, ds_circuits, - group, helpful_scratch_group, model, dataset, - expanded_and_separated_circuits_cache, - expanded_subcircuits_no_spam_cache) + group, helpful_scratch_group, model, + unique_circuits, dataset, + self.expanded_and_separated_circuits_cache, + self.expanded_subcircuits_no_spam_cache) super().__init__(circuits, unique_circuits, to_unique, unique_complete_circuits, _create_atom, list(zip(groups, helpful_scratch)), num_tree_processors, @@ -388,7 +397,7 @@ def create_matrix_copa_layout_circuit_cache(circuits, model, dataset=None): completed_circuits, split_circuits = model.complete_circuits(circuits, return_split=True) cache['completed_circuits'] = {ckt: comp_ckt for ckt, comp_ckt in zip(circuits, completed_circuits)} - cache['split_circuits'] = {ckt: split_ckt for ckt, split_ckt in zip(cache['completed_circuits'].values(), split_circuits)} + cache['split_circuits'] = {ckt: split_ckt for ckt, split_ckt in zip(circuits, split_circuits)} #There is some potential aliasing that happens in the init that I am not #doing here, but I think 90+% of the time this ought to be fine. @@ -404,7 +413,7 @@ def create_matrix_copa_layout_circuit_cache(circuits, model, dataset=None): observed_outcomes_list = unique_outcomes_list, split_circuits = split_circuits) - expanded_circuit_cache = {ckt: expanded_ckt for ckt,expanded_ckt in zip(cache['completed_circuits'].values(), expanded_circuit_outcome_list)} + expanded_circuit_cache = {ckt: expanded_ckt for ckt,expanded_ckt in zip(circuits, expanded_circuit_outcome_list)} cache['expanded_and_separated_circuits'] = expanded_circuit_cache diff --git a/pygsti/models/model.py b/pygsti/models/model.py index 3d986546d..2ebdc69ba 100644 --- a/pygsti/models/model.py +++ b/pygsti/models/model.py @@ -1647,7 +1647,7 @@ def bulk_expand_instruments_and_separate_povm(self, circuits, observed_outcomes_ method when that has previously been run. When using this kwarg only one of this or the `complete_circuits` kwargs should be used. - complete_circuits : list of Circuits, optional (default None) + completed_circuits : list of Circuits, optional (default None) If specified, this is a list of compeleted circuits with prep and povm labels included. This is the format produced by the :meth:complete_circuit(s) method, and this can be used to accelerate this method call when that has been previously run. Should not @@ -1655,9 +1655,9 @@ def bulk_expand_instruments_and_separate_povm(self, circuits, observed_outcomes_ Returns ------- - OrderedDict - A dict whose keys are :class:`SeparatePOVMCircuit` objects and whose - values are tuples of the outcome labels corresponding to this circuit, + list of OrderedDicts + A list of dictionaries whose keys are :class:`SeparatePOVMCircuit` objects and whose + values are tuples of the outcome labels corresponding to each circuit, one per POVM effect held in the key. """ diff --git a/pygsti/objectivefns/objectivefns.py b/pygsti/objectivefns/objectivefns.py index f2a19b9f6..e07e702dd 100644 --- a/pygsti/objectivefns/objectivefns.py +++ b/pygsti/objectivefns/objectivefns.py @@ -842,7 +842,7 @@ class ModelDatasetCircuitsStore(object): point. """ def __init__(self, model, dataset, circuits=None, resource_alloc=None, array_types=(), - precomp_layout=None, verbosity=0, outcome_count_by_circuit=None): + precomp_layout=None, outcome_count_by_circuit=None, verbosity=0): self.dataset = dataset self.model = model self.resource_alloc = _ResourceAllocation.cast(resource_alloc) @@ -876,9 +876,9 @@ def __init__(self, model, dataset, circuits=None, resource_alloc=None, array_typ #self.circuits self.split_circuit_cache = self.layout.split_circuit_cache self.split_circuits = [self.split_circuit_cache[ckt] for ckt in self.circuits] + #currently only implemented for matrix, will eventually add map support. else: - self.split_circuits = None self.split_circuit_cache = None #set the value of the circuit outcome count cache (can be None) @@ -938,33 +938,19 @@ def add_omitted_freqs(self, printer=None, force=False): """ if self.firsts is None or force: # FUTURE: add any tracked memory? self.resource_alloc.add_tracked_memory(...) - self.firsts = [] - self.indicesOfCircuitsWithOmittedData = [] - - if self.outcome_count_by_circuit_cache is None: - #bulk compute the number of outcomes. - if isinstance(self.model, _OpModel) and self.split_circuits is not None: - bulk_outcomes_list = self.model.bulk_circuit_outcomes(self.circuits, split_circuits=self.split_circuits) - num_outcomes_list = [len(outcome_tup) for outcome_tup in bulk_outcomes_list] - else: - num_outcomes_list = [self.model.compute_num_outcomes(c) for c in self.circuits] + self.firsts = []; self.indicesOfCircuitsWithOmittedData = [] + + #bulk compute the number of outcomes. + if isinstance(self.model, _OpModel): + bulk_outcomes_list = self.model.bulk_circuit_outcomes(self.circuits, split_circuits=self.split_circuits) + num_outcomes_list = [len(outcome_tup) for outcome_tup in bulk_outcomes_list] else: - num_outcomes_list = [] - for ckt in self.circuits: - num_outcomes = self.outcome_count_by_circuit_cache.get(ckt, None) - if num_outcomes is None: - num_outcomes = self.model.compute_num_outcomes(ckt) - #also add this to the cache, just in case it is later needed. - self.outcome_count_by_circuit_cache[ckt] = num_outcomes - num_outcomes_list.append(num_outcomes) + num_outcomes_list = [self.model.compute_num_outcomes(c) for c in self.circuits] for i in range(len(self.circuits)): - indices = self.layout.indices_for_index(i) - #The return types of indices_for_index are either ndarrays - #or slices. - if isinstance(indices, slice): - indices = _slct.indices(indices) - if 0 < len(indices) < num_outcomes_list[i]: + indices = _slct.to_array(self.layout.indices_for_index(i)) + lklen = _slct.length(self.layout.indices_for_index(i)) + if 0 < lklen < num_outcomes_list[i]: self.firsts.append(indices[0]) self.indicesOfCircuitsWithOmittedData.append(i) if self.firsts: From 9df2a61bd6c308c84edaa3e205be1e4e1583eea7 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sun, 2 Jun 2024 18:11:48 -0600 Subject: [PATCH 12/41] Tweak omitted freqs and counts + DataSet and slicetools Performance optimization for the method for adding omitted frequencies to incorporate caching of the number of outcomes per circuit (which is somewhat expensive since it goes through the instrument/povm expansion code). Additionally refactor some other parts of this code for improved efficiency. Also makes a few minor tweaks to the method for adding counts to speed that up as well. Can probably make this a bit faster still by merging the two calls to reduce redundancy, but that is a future us problem. Additionally make a few microoptimizations to the dataset code for grabbing counts, and to slicetools adding a function for directly giving a numpy array for a slice (instead of needing to cast from a list). Miscellaneous cleanup of old commented out code that doesn't appear needed any longer. --- pygsti/algorithms/core.py | 2 +- pygsti/data/dataset.py | 3 ++- pygsti/objectivefns/objectivefns.py | 36 ++++++++++++++++++++--------- 3 files changed, 28 insertions(+), 13 deletions(-) diff --git a/pygsti/algorithms/core.py b/pygsti/algorithms/core.py index fc367dfc1..62d84e08e 100644 --- a/pygsti/algorithms/core.py +++ b/pygsti/algorithms/core.py @@ -895,7 +895,7 @@ def _max_array_types(artypes_list): # get the maximum number of each array type if isinstance(mdl, _models.model.OpModel): if precomp_layout_circuit_cache is not None: #then grab the split circuits from there. expanded_circuit_outcome_list = mdl.bulk_expand_instruments_and_separate_povm(unique_circuits, - split_circuits = precomp_layout_circuit_cache['split_circuits']) + split_circuits = precomp_layout_circuit_cache['split_circuits'].values()) outcome_count_by_circuit_cache = {ckt: len(outcome_tup) for ckt,outcome_tup in zip(unique_circuits, expanded_circuit_outcome_list)} else: expanded_circuit_outcome_list = mdl.bulk_expand_instruments_and_separate_povm(unique_circuits) diff --git a/pygsti/data/dataset.py b/pygsti/data/dataset.py index ce7bb52c6..091339f91 100644 --- a/pygsti/data/dataset.py +++ b/pygsti/data/dataset.py @@ -563,6 +563,7 @@ def _get_counts(self, timestamp=None, all_outcomes=False): else: tslc = slice(None) oli_tslc = self.oli[tslc] + rep_tslc = self.reps[tslc] nOutcomes = len(self.dataset.olIndex) nIndices = len(oli_tslc) @@ -576,7 +577,7 @@ def _get_counts(self, timestamp=None, all_outcomes=False): for ol, i in self.dataset.olIndex.items(): inds = oli_tslc[oli_tslc == i] if len(inds) > 0 or all_outcomes: - cntDict.setitem_unsafe(ol, float(sum(self.reps[tslc][inds]))) + cntDict.setitem_unsafe(ol, float(sum(rep_tslc[inds]))) else: if self.reps is None: for ol_index in oli_tslc: diff --git a/pygsti/objectivefns/objectivefns.py b/pygsti/objectivefns/objectivefns.py index e07e702dd..0b7af6497 100644 --- a/pygsti/objectivefns/objectivefns.py +++ b/pygsti/objectivefns/objectivefns.py @@ -842,7 +842,7 @@ class ModelDatasetCircuitsStore(object): point. """ def __init__(self, model, dataset, circuits=None, resource_alloc=None, array_types=(), - precomp_layout=None, outcome_count_by_circuit=None, verbosity=0): + precomp_layout=None, verbosity=0, outcome_count_by_circuit=None): self.dataset = dataset self.model = model self.resource_alloc = _ResourceAllocation.cast(resource_alloc) @@ -938,19 +938,33 @@ def add_omitted_freqs(self, printer=None, force=False): """ if self.firsts is None or force: # FUTURE: add any tracked memory? self.resource_alloc.add_tracked_memory(...) - self.firsts = []; self.indicesOfCircuitsWithOmittedData = [] - - #bulk compute the number of outcomes. - if isinstance(self.model, _OpModel): - bulk_outcomes_list = self.model.bulk_circuit_outcomes(self.circuits, split_circuits=self.split_circuits) - num_outcomes_list = [len(outcome_tup) for outcome_tup in bulk_outcomes_list] + self.firsts = [] + self.indicesOfCircuitsWithOmittedData = [] + + if self.outcome_count_by_circuit_cache is None: + #bulk compute the number of outcomes. + if isinstance(self.model, _OpModel) and self.split_circuits is not None: + bulk_outcomes_list = self.model.bulk_circuit_outcomes(self.circuits, split_circuits=self.split_circuits) + num_outcomes_list = [len(outcome_tup) for outcome_tup in bulk_outcomes_list] + else: + num_outcomes_list = [self.model.compute_num_outcomes(c) for c in self.circuits] else: - num_outcomes_list = [self.model.compute_num_outcomes(c) for c in self.circuits] + num_outcomes_list = [] + for ckt in self.circuits: + num_outcomes = self.outcome_count_by_circuit_cache.get(ckt, None) + if num_outcomes is None: + num_outcomes = self.model.compute_num_outcomes(ckt) + #also add this to the cache, just in case it is later needed. + self.outcome_count_by_circuit_cache[ckt] = num_outcomes + num_outcomes_list.append(num_outcomes) for i in range(len(self.circuits)): - indices = _slct.to_array(self.layout.indices_for_index(i)) - lklen = _slct.length(self.layout.indices_for_index(i)) - if 0 < lklen < num_outcomes_list[i]: + indices = self.layout.indices_for_index(i) + #The return types of indices_for_index are either ndarrays + #or slices. + if isinstance(indices, slice): + indices = _slct.indices(indices) + if 0 < len(indices) < num_outcomes_list[i]: self.firsts.append(indices[0]) self.indicesOfCircuitsWithOmittedData.append(i) if self.firsts: From 8d5cfc7e33e0548b9d2c3aa3af536432b7bb6a51 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sun, 2 Jun 2024 18:23:33 -0600 Subject: [PATCH 13/41] Fix dataset bug Fix a bug I introduced in dataset indexing into something that could be None. --- pygsti/data/dataset.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pygsti/data/dataset.py b/pygsti/data/dataset.py index 091339f91..ce7bb52c6 100644 --- a/pygsti/data/dataset.py +++ b/pygsti/data/dataset.py @@ -563,7 +563,6 @@ def _get_counts(self, timestamp=None, all_outcomes=False): else: tslc = slice(None) oli_tslc = self.oli[tslc] - rep_tslc = self.reps[tslc] nOutcomes = len(self.dataset.olIndex) nIndices = len(oli_tslc) @@ -577,7 +576,7 @@ def _get_counts(self, timestamp=None, all_outcomes=False): for ol, i in self.dataset.olIndex.items(): inds = oli_tslc[oli_tslc == i] if len(inds) > 0 or all_outcomes: - cntDict.setitem_unsafe(ol, float(sum(rep_tslc[inds]))) + cntDict.setitem_unsafe(ol, float(sum(self.reps[tslc][inds]))) else: if self.reps is None: for ol_index in oli_tslc: From cd653b397ff2eb6c26c6da019aa5ecf253f2918a Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sun, 2 Jun 2024 18:49:01 -0600 Subject: [PATCH 14/41] Another minor bugfix caught by testing Another minor bug caught by testing. --- pygsti/objectivefns/objectivefns.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pygsti/objectivefns/objectivefns.py b/pygsti/objectivefns/objectivefns.py index 0b7af6497..f2a19b9f6 100644 --- a/pygsti/objectivefns/objectivefns.py +++ b/pygsti/objectivefns/objectivefns.py @@ -876,9 +876,9 @@ def __init__(self, model, dataset, circuits=None, resource_alloc=None, array_typ #self.circuits self.split_circuit_cache = self.layout.split_circuit_cache self.split_circuits = [self.split_circuit_cache[ckt] for ckt in self.circuits] - #currently only implemented for matrix, will eventually add map support. else: + self.split_circuits = None self.split_circuit_cache = None #set the value of the circuit outcome count cache (can be None) From fd1e8d17ce99ec497ceed947183406a4080ceb93 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sun, 2 Jun 2024 20:58:11 -0600 Subject: [PATCH 15/41] Another minor bugfix caught by testing --- pygsti/algorithms/core.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pygsti/algorithms/core.py b/pygsti/algorithms/core.py index 62d84e08e..c36e50689 100644 --- a/pygsti/algorithms/core.py +++ b/pygsti/algorithms/core.py @@ -888,8 +888,11 @@ def _max_array_types(artypes_list): # get the maximum number of each array type for i, circuit_list in enumerate(circuit_lists): printer.log(f'Layout for iteration {i}', 2) - precomp_layouts.append(mdl.sim.create_layout(circuit_list, dataset, resource_alloc, array_types, verbosity= printer - 1, - layout_creation_circuit_cache = precomp_layout_circuit_cache)) + if isinstance(mdl.sim, _fwdsims.MatrixForwardSimulator): + precomp_layouts.append(mdl.sim.create_layout(circuit_list, dataset, resource_alloc, array_types, verbosity= printer - 1, + layout_creation_circuit_cache = precomp_layout_circuit_cache)) + else: + precomp_layouts.append(mdl.sim.create_layout(circuit_list, dataset, resource_alloc, array_types, verbosity= printer - 1)) #precompute a cache of possible outcome counts for each circuits to accelerate MDC store creation if isinstance(mdl, _models.model.OpModel): From 1ee61120d5a66cada50a500e778306cd8c4e4199 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Tue, 4 Jun 2024 22:41:54 -0600 Subject: [PATCH 16/41] Faster implementation of __getitem__ Improve the performance of __getitem__ when indexing into static circuits by making use of the _copy_init code path. --- pygsti/circuits/circuit.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/pygsti/circuits/circuit.py b/pygsti/circuits/circuit.py index 3af80ec39..1fba79e74 100644 --- a/pygsti/circuits/circuit.py +++ b/pygsti/circuits/circuit.py @@ -1193,29 +1193,26 @@ def extract_labels(self, layers=None, lines=None, strict=True): Note that the returned circuit doesn't retain any original metadata, such as the compilable layer indices or occurence id. """ - nonint_layers = not isinstance(layers, int) #Shortcut for common case when lines == None and when we're only taking a layer slice/index if lines is None and layers is not None: if self._static: - if not nonint_layers: + if isinstance(layers, int): return self._labels[layers] if isinstance(layers, slice) and strict is True: # if strict=False, then need to recompute line labels #can speed this up a measurably by manually computing the new hashable tuple value and hash - if not self._line_labels in (('*',), ()): - new_hashable_tup = self._labels[layers] + ('@',) + self._line_labels - else: - new_hashable_tup = self._labels[layers] + new_hashable_tup = self._labels[layers] + ('@',) + self._line_labels ret = Circuit.__new__(Circuit) - return ret._copy_init(self._labels[layers], self._line_labels, not self._static, hashable_tup= new_hashable_tup, precomp_hash=hash(new_hashable_tup)) + return ret._copy_init(self._labels[layers], self._line_labels, not self._static, + hashable_tup= new_hashable_tup, + precomp_hash=hash(new_hashable_tup)) else: - if not nonint_layers: + if isinstance(layers, int): return self.layertup[layers] if isinstance(layers, slice) and strict is True: # if strict=False, then need to recompute line labels return Circuit._fastinit(self._labels[layers], self._line_labels, not self._static) #otherwise assert both are not None: - layers = self._proc_layers_arg(layers) lines = self._proc_lines_arg(lines) if len(layers) == 0 or len(lines) == 0: @@ -1248,7 +1245,7 @@ def get_sslbls(lbl): return lbl.sslbls ret_layer.append(l) ret.append(_Label(ret_layer) if len(ret_layer) != 1 else ret_layer[0]) # Labels b/c we use _fastinit - if nonint_layers: + if not isinstance(layers, int): if not strict: lines = "auto" # since we may have included lbls on other lines # don't worry about string rep for now... From 1c83f9b9d5334f39ce67e92f765904c0d56f0f2e Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Tue, 4 Jun 2024 22:44:17 -0600 Subject: [PATCH 17/41] Implement caching for map layout creation Implement caching of circuit structures tailored to the map forward simulator's requirements. --- pygsti/algorithms/core.py | 10 ++-- pygsti/forwardsims/mapforwardsim.py | 3 +- pygsti/forwardsims/matrixforwardsim.py | 2 + pygsti/layouts/maplayout.py | 68 ++++++++++++++++++++------ pygsti/layouts/matrixlayout.py | 4 +- 5 files changed, 64 insertions(+), 23 deletions(-) diff --git a/pygsti/algorithms/core.py b/pygsti/algorithms/core.py index c36e50689..91617823f 100644 --- a/pygsti/algorithms/core.py +++ b/pygsti/algorithms/core.py @@ -880,15 +880,17 @@ def _max_array_types(artypes_list): # get the maximum number of each array type precomp_layouts = [] #pre-compute a dictionary caching completed circuits for layout construction performance. - unique_circuits = {ckt for circuit_list in circuit_lists for ckt in circuit_list} + unique_circuits = list({ckt for circuit_list in circuit_lists for ckt in circuit_list}) if isinstance(mdl.sim, _fwdsims.MatrixForwardSimulator): - precomp_layout_circuit_cache = _layouts.matrixlayout.create_matrix_copa_layout_circuit_cache(unique_circuits, mdl) + precomp_layout_circuit_cache = _layouts.matrixlayout.create_matrix_copa_layout_circuit_cache(unique_circuits, mdl, dataset=dataset) + elif isinstance(mdl.sim, _fwdsims.MapForwardSimulator): + precomp_layout_circuit_cache = _layouts.maplayout.create_map_copa_layout_circuit_cache(unique_circuits, mdl, dataset=dataset) else: precomp_layout_circuit_cache = None for i, circuit_list in enumerate(circuit_lists): printer.log(f'Layout for iteration {i}', 2) - if isinstance(mdl.sim, _fwdsims.MatrixForwardSimulator): + if isinstance(mdl.sim, (_fwdsims.MatrixForwardSimulator, _fwdsims.MapForwardSimulator)): precomp_layouts.append(mdl.sim.create_layout(circuit_list, dataset, resource_alloc, array_types, verbosity= printer - 1, layout_creation_circuit_cache = precomp_layout_circuit_cache)) else: @@ -898,7 +900,7 @@ def _max_array_types(artypes_list): # get the maximum number of each array type if isinstance(mdl, _models.model.OpModel): if precomp_layout_circuit_cache is not None: #then grab the split circuits from there. expanded_circuit_outcome_list = mdl.bulk_expand_instruments_and_separate_povm(unique_circuits, - split_circuits = precomp_layout_circuit_cache['split_circuits'].values()) + completed_circuits= precomp_layout_circuit_cache['completed_circuits'].values()) outcome_count_by_circuit_cache = {ckt: len(outcome_tup) for ckt,outcome_tup in zip(unique_circuits, expanded_circuit_outcome_list)} else: expanded_circuit_outcome_list = mdl.bulk_expand_instruments_and_separate_povm(unique_circuits) diff --git a/pygsti/forwardsims/mapforwardsim.py b/pygsti/forwardsims/mapforwardsim.py index 501c1f855..3cba839bc 100644 --- a/pygsti/forwardsims/mapforwardsim.py +++ b/pygsti/forwardsims/mapforwardsim.py @@ -226,8 +226,7 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, array_types Determines how much output to send to stdout. 0 means no output, higher integers mean more output. - layout_creation_circuit_cache: - A precomputed dictionary serving as a cache for completed + A precomputed dictionary serving as a cache for completed circuits. I.e. circuits with prep labels and POVM labels appended. Along with other useful pre-computed circuit structures used in layout creation. diff --git a/pygsti/forwardsims/matrixforwardsim.py b/pygsti/forwardsims/matrixforwardsim.py index 2a50f62ef..2952ddef0 100644 --- a/pygsti/forwardsims/matrixforwardsim.py +++ b/pygsti/forwardsims/matrixforwardsim.py @@ -1061,6 +1061,8 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, array_types layout_creation_circuit_cache : dict, optional (default None) A precomputed dictionary serving as a cache for completed circuits. I.e. circuits with prep labels and POVM labels appended. + Along with other useful pre-computed circuit structures used in layout + creation. Returns ------- diff --git a/pygsti/layouts/maplayout.py b/pygsti/layouts/maplayout.py index b2142a5e0..d0fb7a34d 100644 --- a/pygsti/layouts/maplayout.py +++ b/pygsti/layouts/maplayout.py @@ -53,17 +53,18 @@ class _MapCOPALayoutAtom(_DistributableAtom): def __init__(self, unique_complete_circuits, ds_circuits, group, model, dataset, max_cache_size, expanded_complete_circuit_cache = None): - expanded_circuit_info_by_unique = dict() - expanded_circuit_set = dict() # only use SeparatePOVMCircuit keys as ordered set - - if expanded_complete_circuit_cache is None: - expanded_complete_circuit_cache = dict() + expanded_circuit_info_by_unique = _collections.OrderedDict() + expanded_circuit_set = _collections.OrderedDict() # only use SeparatePOVMCircuit keys as ordered set for i in group: - d = expanded_complete_circuit_cache.get(unique_complete_circuits[i], None) - if d is None: - unique_observed_outcomes = None if (dataset is None) else dataset[ds_circuits[i]].unique_outcomes - d = model.expand_instruments_and_separate_povm(unique_complete_circuits[i], unique_observed_outcomes) + if expanded_complete_circuit_cache is None: + observed_outcomes = None if (dataset is None) else dataset[ds_circuits[i]].outcomes + d = model.expand_instruments_and_separate_povm(unique_complete_circuits[i], observed_outcomes) + else: + d = expanded_complete_circuit_cache.get(unique_complete_circuits[i], None) + if d is None: + observed_outcomes = None if (dataset is None) else dataset[ds_circuits[i]].outcomes + d = model.expand_instruments_and_separate_povm(unique_complete_circuits[i], observed_outcomes) expanded_circuit_info_by_unique[i] = d # a dict of SeparatePOVMCircuits => tuples of outcome labels expanded_circuit_set.update(d) @@ -219,11 +220,14 @@ def __init__(self, circuits, model, dataset=None, max_cache_size=None, ds_circuits = _lt.apply_aliases_to_circuits(unique_circuits, aliases) #extract subcaches from layout_creation_circuit_cache: - if layout_creation_circuit_cache is None: - layout_creation_circuit_cache = dict() - self.completed_circuit_cache = layout_creation_circuit_cache.get('completed_circuits', None) - self.split_circuit_cache = layout_creation_circuit_cache.get('split_circuits', None) - self.expanded_and_separated_circuits_cache = layout_creation_circuit_cache.get('expanded_and_separated_circuits', None) + if layout_creation_circuit_cache is not None: + self.completed_circuit_cache = layout_creation_circuit_cache.get('completed_circuits', None) + self.split_circuit_cache = layout_creation_circuit_cache.get('split_circuits', None) + self.expanded_and_separated_circuits_cache = layout_creation_circuit_cache.get('expanded_and_separated_circuits', None) + else: + self.completed_circuit_cache = None + self.split_circuit_cache = None + self.expanded_and_separated_circuits_cache = None if self.completed_circuit_cache is None: unique_complete_circuits = model.complete_circuits(unique_circuits) @@ -272,4 +276,38 @@ def _create_atom(group): unique_to_orig = {unique_i: orig_i for orig_i, unique_i in self._to_unique.items()} # unique => orig. indices for atom in self.atoms: for expanded_circuit_i, unique_i in atom.unique_indices_by_expcircuit.items(): - atom.orig_indices_by_expcircuit[expanded_circuit_i] = unique_to_orig[unique_i] \ No newline at end of file + atom.orig_indices_by_expcircuit[expanded_circuit_i] = unique_to_orig[unique_i] + + +def create_map_copa_layout_circuit_cache(circuits, model, dataset=None): + """ + Helper function for pre-computing/pre-processing circuits structures + used in matrix layout creation. + """ + cache = dict() + completed_circuits = model.complete_circuits(circuits) + + cache['completed_circuits'] = {ckt: comp_ckt for ckt, comp_ckt in zip(circuits, completed_circuits)} + + split_circuits = model.split_circuits(completed_circuits, split_prep=False) + cache['split_circuits'] = {ckt: split_ckt for ckt, split_ckt in zip(circuits, split_circuits)} + + + if dataset is not None: + outcomes_list = [] + for ckt in circuits: + ds_row = dataset[ckt] + outcomes_list.append(ds_row.outcomes if ds_row is not None else None) + #slightly different than matrix, for some reason outcomes is used in this class + #and unique_outcomes is used in matrix. + else: + outcomes_list = [None]*len(circuits) + + expanded_circuit_outcome_list = model.bulk_expand_instruments_and_separate_povm(circuits, + observed_outcomes_list = outcomes_list, + completed_circuits= completed_circuits) + + expanded_circuit_cache = {ckt: expanded_ckt for ckt,expanded_ckt in zip(completed_circuits, expanded_circuit_outcome_list)} + cache['expanded_and_separated_circuits'] = expanded_circuit_cache + + return cache diff --git a/pygsti/layouts/matrixlayout.py b/pygsti/layouts/matrixlayout.py index fadc472b8..09239f239 100644 --- a/pygsti/layouts/matrixlayout.py +++ b/pygsti/layouts/matrixlayout.py @@ -403,8 +403,8 @@ def create_matrix_copa_layout_circuit_cache(circuits, model, dataset=None): #doing here, but I think 90+% of the time this ought to be fine. if dataset is not None: unique_outcomes_list = [] - for ckt in completed_circuits.values(): - ds_row = dataset.get(ckt, None) + for ckt in circuits: + ds_row = dataset[ckt] unique_outcomes_list.append(ds_row.unique_outcomes if ds_row is not None else None) else: unique_outcomes_list = [None]*len(circuits) From 7a386891561692eee28daa782d9e40c17225bfd1 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Tue, 4 Jun 2024 23:21:33 -0600 Subject: [PATCH 18/41] Fix bugs in new extract_labels implementation --- pygsti/circuits/circuit.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/pygsti/circuits/circuit.py b/pygsti/circuits/circuit.py index 1fba79e74..3af80ec39 100644 --- a/pygsti/circuits/circuit.py +++ b/pygsti/circuits/circuit.py @@ -1193,26 +1193,29 @@ def extract_labels(self, layers=None, lines=None, strict=True): Note that the returned circuit doesn't retain any original metadata, such as the compilable layer indices or occurence id. """ + nonint_layers = not isinstance(layers, int) #Shortcut for common case when lines == None and when we're only taking a layer slice/index if lines is None and layers is not None: if self._static: - if isinstance(layers, int): + if not nonint_layers: return self._labels[layers] if isinstance(layers, slice) and strict is True: # if strict=False, then need to recompute line labels #can speed this up a measurably by manually computing the new hashable tuple value and hash - new_hashable_tup = self._labels[layers] + ('@',) + self._line_labels + if not self._line_labels in (('*',), ()): + new_hashable_tup = self._labels[layers] + ('@',) + self._line_labels + else: + new_hashable_tup = self._labels[layers] ret = Circuit.__new__(Circuit) - return ret._copy_init(self._labels[layers], self._line_labels, not self._static, - hashable_tup= new_hashable_tup, - precomp_hash=hash(new_hashable_tup)) + return ret._copy_init(self._labels[layers], self._line_labels, not self._static, hashable_tup= new_hashable_tup, precomp_hash=hash(new_hashable_tup)) else: - if isinstance(layers, int): + if not nonint_layers: return self.layertup[layers] if isinstance(layers, slice) and strict is True: # if strict=False, then need to recompute line labels return Circuit._fastinit(self._labels[layers], self._line_labels, not self._static) #otherwise assert both are not None: + layers = self._proc_layers_arg(layers) lines = self._proc_lines_arg(lines) if len(layers) == 0 or len(lines) == 0: @@ -1245,7 +1248,7 @@ def get_sslbls(lbl): return lbl.sslbls ret_layer.append(l) ret.append(_Label(ret_layer) if len(ret_layer) != 1 else ret_layer[0]) # Labels b/c we use _fastinit - if not isinstance(layers, int): + if nonint_layers: if not strict: lines = "auto" # since we may have included lbls on other lines # don't worry about string rep for now... From 070c2f4017a23fb485e06709c08f09949d0b44e5 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Tue, 4 Jun 2024 23:54:40 -0600 Subject: [PATCH 19/41] Finish refactoring expand_instruments_and_separate_povm This finishes the process of refactoring expand_instruments_and_separate_povm from a circuit method to a method of OpModel. --- pygsti/circuits/circuit.py | 100 ------------------------------------- 1 file changed, 100 deletions(-) diff --git a/pygsti/circuits/circuit.py b/pygsti/circuits/circuit.py index 3af80ec39..822fd0b65 100644 --- a/pygsti/circuits/circuit.py +++ b/pygsti/circuits/circuit.py @@ -4443,106 +4443,6 @@ def done_editing(self): self._hashable_tup = self.tup self._hash = hash(self._hashable_tup) - def expand_instruments_and_separate_povm(self, model, observed_outcomes=None): - """ - Creates a dictionary of :class:`SeparatePOVMCircuit` objects from expanding the instruments of this circuit. - - Each key of the returned dictionary replaces the instruments in this circuit with a selection - of their members. (The size of the resulting dictionary is the product of the sizes of - each instrument appearing in this circuit when `observed_outcomes is None`). Keys are stored - as :class:`SeparatePOVMCircuit` objects so it's easy to keep track of which POVM outcomes (effects) - correspond to observed data. This function is, for the most part, used internally to process - a circuit before computing its outcome probabilities. - - Parameters - ---------- - model : Model - The model used to provide necessary details regarding the expansion, including: - - - default SPAM layers - - definitions of instrument-containing layers - - expansions of individual instruments and POVMs - - Returns - ------- - OrderedDict - A dict whose keys are :class:`SeparatePOVMCircuit` objects and whose - values are tuples of the outcome labels corresponding to this circuit, - one per POVM effect held in the key. - """ - complete_circuit = model.complete_circuit(self) - expanded_circuit_outcomes = _collections.OrderedDict() - povm_lbl = complete_circuit[-1] # "complete" circuits always end with a POVM label - circuit_without_povm = complete_circuit[0:len(complete_circuit) - 1] - - def create_tree(lst): - subs = _collections.OrderedDict() - for el in lst: - if len(el) > 0: - if el[0] not in subs: subs[el[0]] = [] - subs[el[0]].append(el[1:]) - return _collections.OrderedDict([(k, create_tree(sub_lst)) for k, sub_lst in subs.items()]) - - def add_expanded_circuit_outcomes(circuit, running_outcomes, ootree, start): - """ - """ - cir = circuit if start == 0 else circuit[start:] # for performance, avoid uneeded slicing - for k, layer_label in enumerate(cir, start=start): - components = layer_label.components - #instrument_inds = _np.nonzero([model._is_primitive_instrument_layer_lbl(component) - # for component in components])[0] # SLOWER than statement below - instrument_inds = _np.array([i for i, component in enumerate(components) - if model._is_primitive_instrument_layer_lbl(component)]) - if instrument_inds.size > 0: - # This layer contains at least one instrument => recurse with instrument(s) replaced with - # all combinations of their members. - component_lookup = {i: comp for i, comp in enumerate(components)} - instrument_members = [model._member_labels_for_instrument(components[i]) - for i in instrument_inds] # also components of outcome labels - for selected_instrmt_members in _itertools.product(*instrument_members): - expanded_layer_lbl = component_lookup.copy() - expanded_layer_lbl.update({i: components[i] + "_" + sel - for i, sel in zip(instrument_inds, selected_instrmt_members)}) - expanded_layer_lbl = _Label([expanded_layer_lbl[i] for i in range(len(components))]) - - if ootree is not None: - new_ootree = ootree - for sel in selected_instrmt_members: - new_ootree = new_ootree.get(sel, {}) - if len(new_ootree) == 0: continue # no observed outcomes along this outcome-tree path - else: - new_ootree = None - - add_expanded_circuit_outcomes(circuit[0:k] + Circuit((expanded_layer_lbl,)) + circuit[k + 1:], - running_outcomes + selected_instrmt_members, new_ootree, k + 1) - break - - else: # no more instruments to process: `cir` contains no instruments => add an expanded circuit - assert(circuit not in expanded_circuit_outcomes) # shouldn't be possible to generate duplicates... - elabels = model._effect_labels_for_povm(povm_lbl) if (observed_outcomes is None) \ - else tuple(ootree.keys()) - outcomes = tuple((running_outcomes + (elabel,) for elabel in elabels)) - expanded_circuit_outcomes[SeparatePOVMCircuit(circuit, povm_lbl, elabels)] = outcomes - - ootree = create_tree(observed_outcomes) if observed_outcomes is not None else None # tree of observed outcomes - # e.g. [('0','00'), ('0','01'), ('1','10')] ==> {'0': {'00': {}, '01': {}}, '1': {'10': {}}} - - if model._has_instruments(): - add_expanded_circuit_outcomes(circuit_without_povm, (), ootree, start=0) - else: - # It may be helpful to cache the set of elabels for a POVM (maybe within the model?) because - # currently the call to _effect_labels_for_povm may be a bottleneck. It's needed, even when we have - # observed outcomes, because there may be some observed outcomes that aren't modeled (e.g. leakage states) - if observed_outcomes is None: - elabels = model._effect_labels_for_povm(povm_lbl) - else: - possible_lbls = set(model._effect_labels_for_povm(povm_lbl)) - elabels = tuple([oo for oo in ootree.keys() if oo in possible_lbls]) - outcomes = tuple(((elabel,) for elabel in elabels)) - expanded_circuit_outcomes[SeparatePOVMCircuit(circuit_without_povm, povm_lbl, elabels)] = outcomes - - return expanded_circuit_outcomes - class CompressedCircuit(object): """ A "compressed" Circuit that requires less disk space. From 8f80e31b9657285976415ee06c9fa50683cf9cfe Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Wed, 5 Jun 2024 00:33:40 -0600 Subject: [PATCH 20/41] Refactor expand_instruments_and_separate_povm Refactor expand_instruments_and_separate_povm to use the multi-circuit version under the hood to reduce code duplication. --- pygsti/models/model.py | 74 ++---------------------------------------- 1 file changed, 2 insertions(+), 72 deletions(-) diff --git a/pygsti/models/model.py b/pygsti/models/model.py index 2ebdc69ba..4f0c5bd9c 100644 --- a/pygsti/models/model.py +++ b/pygsti/models/model.py @@ -1539,78 +1539,8 @@ def expand_instruments_and_separate_povm(self, circuit, observed_outcomes=None): values are tuples of the outcome labels corresponding to this circuit, one per POVM effect held in the key. """ - complete_circuit = self.complete_circuit(circuit) - expanded_circuit_outcomes = _collections.OrderedDict() - povm_lbl = complete_circuit[-1] # "complete" circuits always end with a POVM label - circuit_without_povm = complete_circuit[0:len(complete_circuit) - 1] - - def create_tree(lst): - subs = _collections.OrderedDict() - for el in lst: - if len(el) > 0: - if el[0] not in subs: subs[el[0]] = [] - subs[el[0]].append(el[1:]) - return _collections.OrderedDict([(k, create_tree(sub_lst)) for k, sub_lst in subs.items()]) - - def add_expanded_circuit_outcomes(circuit, running_outcomes, ootree, start): - """ - """ - cir = circuit if start == 0 else circuit[start:] # for performance, avoid uneeded slicing - for k, layer_label in enumerate(cir, start=start): - components = layer_label.components - #instrument_inds = _np.nonzero([model._is_primitive_instrument_layer_lbl(component) - # for component in components])[0] # SLOWER than statement below - instrument_inds = _np.array([i for i, component in enumerate(components) - if self._is_primitive_instrument_layer_lbl(component)]) - if instrument_inds.size > 0: - # This layer contains at least one instrument => recurse with instrument(s) replaced with - # all combinations of their members. - component_lookup = {i: comp for i, comp in enumerate(components)} - instrument_members = [self._member_labels_for_instrument(components[i]) - for i in instrument_inds] # also components of outcome labels - for selected_instrmt_members in _itertools.product(*instrument_members): - expanded_layer_lbl = component_lookup.copy() - expanded_layer_lbl.update({i: components[i] + "_" + sel - for i, sel in zip(instrument_inds, selected_instrmt_members)}) - expanded_layer_lbl = _Label([expanded_layer_lbl[i] for i in range(len(components))]) - - if ootree is not None: - new_ootree = ootree - for sel in selected_instrmt_members: - new_ootree = new_ootree.get(sel, {}) - if len(new_ootree) == 0: continue # no observed outcomes along this outcome-tree path - else: - new_ootree = None - - add_expanded_circuit_outcomes(circuit[0:k] + _Circuit((expanded_layer_lbl,)) + circuit[k + 1:], - running_outcomes + selected_instrmt_members, new_ootree, k + 1) - break - - else: # no more instruments to process: `cir` contains no instruments => add an expanded circuit - assert(circuit not in expanded_circuit_outcomes) # shouldn't be possible to generate duplicates... - elabels = self._effect_labels_for_povm(povm_lbl) if (observed_outcomes is None) \ - else tuple(ootree.keys()) - outcomes = tuple((running_outcomes + (elabel,) for elabel in elabels)) - expanded_circuit_outcomes[_SeparatePOVMCircuit(circuit, povm_lbl, elabels)] = outcomes - - ootree = create_tree(observed_outcomes) if observed_outcomes is not None else None # tree of observed outcomes - # e.g. [('0','00'), ('0','01'), ('1','10')] ==> {'0': {'00': {}, '01': {}}, '1': {'10': {}}} - - if self._has_instruments(): - add_expanded_circuit_outcomes(circuit_without_povm, (), ootree, start=0) - else: - # It may be helpful to cache the set of elabels for a POVM (maybe within the model?) because - # currently the call to _effect_labels_for_povm may be a bottleneck. It's needed, even when we have - # observed outcomes, because there may be some observed outcomes that aren't modeled (e.g. leakage states) - if observed_outcomes is None: - elabels = self._effect_labels_for_povm(povm_lbl) - else: - possible_lbls = set(self._effect_labels_for_povm(povm_lbl)) - elabels = tuple([oo for oo in ootree.keys() if oo in possible_lbls]) - outcomes = tuple(((elabel,) for elabel in elabels)) - expanded_circuit_outcomes[_SeparatePOVMCircuit(circuit_without_povm, povm_lbl, elabels)] = outcomes - - return expanded_circuit_outcomes + expanded_circuit_outcomes = self.bulk_expand_instruments_and_separate_povm([circuit], [observed_outcomes]) + return expanded_circuit_outcomes[0] def bulk_expand_instruments_and_separate_povm(self, circuits, observed_outcomes_list=None, split_circuits = None, completed_circuits = None): From 007c44995b52e6f6630692edf07bd2e4f5a91722 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Wed, 5 Jun 2024 16:46:52 -0600 Subject: [PATCH 21/41] Refactor cache creation functions Refactor cache creation functions into static methods of the corresponding forward simulator class. Also add an empty base version of this method, and clean up a few miscellaneous things caught by review. --- pygsti/algorithms/core.py | 21 +++++--------- pygsti/forwardsims/mapforwardsim.py | 14 ++++----- pygsti/forwardsims/matrixforwardsim.py | 6 ++-- pygsti/layouts/maplayout.py | 34 ---------------------- pygsti/layouts/matrixlayout.py | 39 -------------------------- 5 files changed, 17 insertions(+), 97 deletions(-) diff --git a/pygsti/algorithms/core.py b/pygsti/algorithms/core.py index 91617823f..61c3a2185 100644 --- a/pygsti/algorithms/core.py +++ b/pygsti/algorithms/core.py @@ -881,34 +881,27 @@ def _max_array_types(artypes_list): # get the maximum number of each array type #pre-compute a dictionary caching completed circuits for layout construction performance. unique_circuits = list({ckt for circuit_list in circuit_lists for ckt in circuit_list}) - if isinstance(mdl.sim, _fwdsims.MatrixForwardSimulator): - precomp_layout_circuit_cache = _layouts.matrixlayout.create_matrix_copa_layout_circuit_cache(unique_circuits, mdl, dataset=dataset) - elif isinstance(mdl.sim, _fwdsims.MapForwardSimulator): - precomp_layout_circuit_cache = _layouts.maplayout.create_map_copa_layout_circuit_cache(unique_circuits, mdl, dataset=dataset) + if isinstance(mdl.sim, (_fwdsims.MatrixForwardSimulator, _fwdsims.MapForwardSimulator)): + precomp_layout_circuit_cache = mdl.sim.create_copa_layout_circuit_cache(unique_circuits, mdl, dataset=dataset) else: precomp_layout_circuit_cache = None for i, circuit_list in enumerate(circuit_lists): printer.log(f'Layout for iteration {i}', 2) - if isinstance(mdl.sim, (_fwdsims.MatrixForwardSimulator, _fwdsims.MapForwardSimulator)): - precomp_layouts.append(mdl.sim.create_layout(circuit_list, dataset, resource_alloc, array_types, verbosity= printer - 1, - layout_creation_circuit_cache = precomp_layout_circuit_cache)) - else: - precomp_layouts.append(mdl.sim.create_layout(circuit_list, dataset, resource_alloc, array_types, verbosity= printer - 1)) - + precomp_layouts.append(mdl.sim.create_layout(circuit_list, dataset, resource_alloc, array_types, verbosity= printer - 1, + layout_creation_circuit_cache = precomp_layout_circuit_cache)) + #precompute a cache of possible outcome counts for each circuits to accelerate MDC store creation if isinstance(mdl, _models.model.OpModel): if precomp_layout_circuit_cache is not None: #then grab the split circuits from there. expanded_circuit_outcome_list = mdl.bulk_expand_instruments_and_separate_povm(unique_circuits, completed_circuits= precomp_layout_circuit_cache['completed_circuits'].values()) - outcome_count_by_circuit_cache = {ckt: len(outcome_tup) for ckt,outcome_tup in zip(unique_circuits, expanded_circuit_outcome_list)} else: - expanded_circuit_outcome_list = mdl.bulk_expand_instruments_and_separate_povm(unique_circuits) - outcome_count_by_circuit_cache = {ckt: len(outcome_tup) for ckt,outcome_tup in zip(unique_circuits, expanded_circuit_outcome_list)} + expanded_circuit_outcome_list = mdl.bulk_expand_instruments_and_separate_povm(unique_circuits) + outcome_count_by_circuit_cache = {ckt: len(outcome_tup) for ckt,outcome_tup in zip(unique_circuits, expanded_circuit_outcome_list)} else: outcome_count_by_circuit_cache = {ckt: mdl.compute_num_outcomes(ckt) for ckt in unique_circuits} - with printer.progress_logging(1): for i in range(starting_index, len(circuit_lists)): circuitsToEstimate = circuit_lists[i] diff --git a/pygsti/forwardsims/mapforwardsim.py b/pygsti/forwardsims/mapforwardsim.py index 3cba839bc..ae52c241b 100644 --- a/pygsti/forwardsims/mapforwardsim.py +++ b/pygsti/forwardsims/mapforwardsim.py @@ -324,17 +324,17 @@ def create_copa_layout_circuit_cache(circuits, model, dataset=None): if dataset is not None: - aliases = circuits.op_label_aliases if isinstance(circuits, _CircuitList) else None - ds_circuits = _lt.apply_aliases_to_circuits(circuits, aliases) - unique_outcomes_list = [] - for ckt in ds_circuits: + outcomes_list = [] + for ckt in circuits: ds_row = dataset[ckt] - unique_outcomes_list.append(ds_row.unique_outcomes if ds_row is not None else None) + outcomes_list.append(ds_row.outcomes if ds_row is not None else None) + #slightly different than matrix, for some reason outcomes is used in this class + #and unique_outcomes is used in matrix. else: - unique_outcomes_list = [None]*len(circuits) + outcomes_list = [None]*len(circuits) expanded_circuit_outcome_list = model.bulk_expand_instruments_and_separate_povm(circuits, - observed_outcomes_list = unique_outcomes_list, + observed_outcomes_list = outcomes_list, completed_circuits= completed_circuits) expanded_circuit_cache = {ckt: expanded_ckt for ckt,expanded_ckt in zip(completed_circuits, expanded_circuit_outcome_list)} diff --git a/pygsti/forwardsims/matrixforwardsim.py b/pygsti/forwardsims/matrixforwardsim.py index 2952ddef0..222a1aa2e 100644 --- a/pygsti/forwardsims/matrixforwardsim.py +++ b/pygsti/forwardsims/matrixforwardsim.py @@ -1162,11 +1162,11 @@ def create_copa_layout_circuit_cache(circuits, model, dataset=None): cache['completed_circuits'] = {ckt: comp_ckt for ckt, comp_ckt in zip(circuits, completed_circuits)} cache['split_circuits'] = {ckt: split_ckt for ckt, split_ckt in zip(circuits, split_circuits)} + #There is some potential aliasing that happens in the init that I am not + #doing here, but I think 90+% of the time this ought to be fine. if dataset is not None: - aliases = circuits.op_label_aliases if isinstance(circuits, _CircuitList) else None - ds_circuits = _lt.apply_aliases_to_circuits(circuits, aliases) unique_outcomes_list = [] - for ckt in ds_circuits: + for ckt in circuits: ds_row = dataset[ckt] unique_outcomes_list.append(ds_row.unique_outcomes if ds_row is not None else None) else: diff --git a/pygsti/layouts/maplayout.py b/pygsti/layouts/maplayout.py index d0fb7a34d..7729ddff0 100644 --- a/pygsti/layouts/maplayout.py +++ b/pygsti/layouts/maplayout.py @@ -277,37 +277,3 @@ def _create_atom(group): for atom in self.atoms: for expanded_circuit_i, unique_i in atom.unique_indices_by_expcircuit.items(): atom.orig_indices_by_expcircuit[expanded_circuit_i] = unique_to_orig[unique_i] - - -def create_map_copa_layout_circuit_cache(circuits, model, dataset=None): - """ - Helper function for pre-computing/pre-processing circuits structures - used in matrix layout creation. - """ - cache = dict() - completed_circuits = model.complete_circuits(circuits) - - cache['completed_circuits'] = {ckt: comp_ckt for ckt, comp_ckt in zip(circuits, completed_circuits)} - - split_circuits = model.split_circuits(completed_circuits, split_prep=False) - cache['split_circuits'] = {ckt: split_ckt for ckt, split_ckt in zip(circuits, split_circuits)} - - - if dataset is not None: - outcomes_list = [] - for ckt in circuits: - ds_row = dataset[ckt] - outcomes_list.append(ds_row.outcomes if ds_row is not None else None) - #slightly different than matrix, for some reason outcomes is used in this class - #and unique_outcomes is used in matrix. - else: - outcomes_list = [None]*len(circuits) - - expanded_circuit_outcome_list = model.bulk_expand_instruments_and_separate_povm(circuits, - observed_outcomes_list = outcomes_list, - completed_circuits= completed_circuits) - - expanded_circuit_cache = {ckt: expanded_ckt for ckt,expanded_ckt in zip(completed_circuits, expanded_circuit_outcome_list)} - cache['expanded_and_separated_circuits'] = expanded_circuit_cache - - return cache diff --git a/pygsti/layouts/matrixlayout.py b/pygsti/layouts/matrixlayout.py index 09239f239..a5df2fee1 100644 --- a/pygsti/layouts/matrixlayout.py +++ b/pygsti/layouts/matrixlayout.py @@ -387,45 +387,6 @@ def _create_atom(args): _create_atom, list(zip(groups, helpful_scratch)), num_tree_processors, num_param_dimension_processors, param_dimensions, param_dimension_blk_sizes, resource_alloc, verbosity) - -def create_matrix_copa_layout_circuit_cache(circuits, model, dataset=None): - """ - Helper function for pre-computing/pre-processing circuits structures - used in matrix layout creation. - """ - cache = dict() - completed_circuits, split_circuits = model.complete_circuits(circuits, return_split=True) - - cache['completed_circuits'] = {ckt: comp_ckt for ckt, comp_ckt in zip(circuits, completed_circuits)} - cache['split_circuits'] = {ckt: split_ckt for ckt, split_ckt in zip(circuits, split_circuits)} - - #There is some potential aliasing that happens in the init that I am not - #doing here, but I think 90+% of the time this ought to be fine. - if dataset is not None: - unique_outcomes_list = [] - for ckt in circuits: - ds_row = dataset[ckt] - unique_outcomes_list.append(ds_row.unique_outcomes if ds_row is not None else None) - else: - unique_outcomes_list = [None]*len(circuits) - - expanded_circuit_outcome_list = model.bulk_expand_instruments_and_separate_povm(circuits, - observed_outcomes_list = unique_outcomes_list, - split_circuits = split_circuits) - - expanded_circuit_cache = {ckt: expanded_ckt for ckt,expanded_ckt in zip(circuits, expanded_circuit_outcome_list)} - - cache['expanded_and_separated_circuits'] = expanded_circuit_cache - - expanded_subcircuits_no_spam_cache = dict() - for expc_outcomes in cache['expanded_and_separated_circuits'].values(): - for sep_povm_c, _ in expc_outcomes.items(): # for each expanded cir from unique_i-th circuit - exp_nospam_c = sep_povm_c.circuit_without_povm[1:] - expanded_subcircuits_no_spam_cache[exp_nospam_c] = exp_nospam_c.expand_subcircuits() - - cache['expanded_subcircuits_no_spam'] = expanded_subcircuits_no_spam_cache - - return cache From bc6d6caedcea294c9563ec31f1b2aa8955a2f2c6 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Tue, 30 Jul 2024 20:29:37 -0600 Subject: [PATCH 22/41] Add in DataSet key aliasing Add in support for data set key aliasing in COPA layout cache creation. --- pygsti/forwardsims/mapforwardsim.py | 14 +++++++------- pygsti/forwardsims/matrixforwardsim.py | 6 +++--- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/pygsti/forwardsims/mapforwardsim.py b/pygsti/forwardsims/mapforwardsim.py index ae52c241b..3cba839bc 100644 --- a/pygsti/forwardsims/mapforwardsim.py +++ b/pygsti/forwardsims/mapforwardsim.py @@ -324,17 +324,17 @@ def create_copa_layout_circuit_cache(circuits, model, dataset=None): if dataset is not None: - outcomes_list = [] - for ckt in circuits: + aliases = circuits.op_label_aliases if isinstance(circuits, _CircuitList) else None + ds_circuits = _lt.apply_aliases_to_circuits(circuits, aliases) + unique_outcomes_list = [] + for ckt in ds_circuits: ds_row = dataset[ckt] - outcomes_list.append(ds_row.outcomes if ds_row is not None else None) - #slightly different than matrix, for some reason outcomes is used in this class - #and unique_outcomes is used in matrix. + unique_outcomes_list.append(ds_row.unique_outcomes if ds_row is not None else None) else: - outcomes_list = [None]*len(circuits) + unique_outcomes_list = [None]*len(circuits) expanded_circuit_outcome_list = model.bulk_expand_instruments_and_separate_povm(circuits, - observed_outcomes_list = outcomes_list, + observed_outcomes_list = unique_outcomes_list, completed_circuits= completed_circuits) expanded_circuit_cache = {ckt: expanded_ckt for ckt,expanded_ckt in zip(completed_circuits, expanded_circuit_outcome_list)} diff --git a/pygsti/forwardsims/matrixforwardsim.py b/pygsti/forwardsims/matrixforwardsim.py index 222a1aa2e..2952ddef0 100644 --- a/pygsti/forwardsims/matrixforwardsim.py +++ b/pygsti/forwardsims/matrixforwardsim.py @@ -1162,11 +1162,11 @@ def create_copa_layout_circuit_cache(circuits, model, dataset=None): cache['completed_circuits'] = {ckt: comp_ckt for ckt, comp_ckt in zip(circuits, completed_circuits)} cache['split_circuits'] = {ckt: split_ckt for ckt, split_ckt in zip(circuits, split_circuits)} - #There is some potential aliasing that happens in the init that I am not - #doing here, but I think 90+% of the time this ought to be fine. if dataset is not None: + aliases = circuits.op_label_aliases if isinstance(circuits, _CircuitList) else None + ds_circuits = _lt.apply_aliases_to_circuits(circuits, aliases) unique_outcomes_list = [] - for ckt in circuits: + for ckt in ds_circuits: ds_row = dataset[ckt] unique_outcomes_list.append(ds_row.unique_outcomes if ds_row is not None else None) else: From a79f2865add14448fae0452933540cf0235c1550 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Tue, 30 Jul 2024 21:41:57 -0600 Subject: [PATCH 23/41] Minor refactors and updates Rework some of the if statement branching in the layout creation to instead use fallback behavior of get more. --- pygsti/layouts/maplayout.py | 32 ++++++++--------- pygsti/layouts/matrixlayout.py | 66 +++++++++++++++------------------- 2 files changed, 43 insertions(+), 55 deletions(-) diff --git a/pygsti/layouts/maplayout.py b/pygsti/layouts/maplayout.py index 7729ddff0..501e440da 100644 --- a/pygsti/layouts/maplayout.py +++ b/pygsti/layouts/maplayout.py @@ -53,18 +53,17 @@ class _MapCOPALayoutAtom(_DistributableAtom): def __init__(self, unique_complete_circuits, ds_circuits, group, model, dataset, max_cache_size, expanded_complete_circuit_cache = None): - expanded_circuit_info_by_unique = _collections.OrderedDict() - expanded_circuit_set = _collections.OrderedDict() # only use SeparatePOVMCircuit keys as ordered set + expanded_circuit_info_by_unique = dict() + expanded_circuit_set = dict() # only use SeparatePOVMCircuit keys as ordered set + + if expanded_complete_circuit_cache is None: + expanded_complete_circuit_cache = dict() for i in group: - if expanded_complete_circuit_cache is None: - observed_outcomes = None if (dataset is None) else dataset[ds_circuits[i]].outcomes - d = model.expand_instruments_and_separate_povm(unique_complete_circuits[i], observed_outcomes) - else: - d = expanded_complete_circuit_cache.get(unique_complete_circuits[i], None) - if d is None: - observed_outcomes = None if (dataset is None) else dataset[ds_circuits[i]].outcomes - d = model.expand_instruments_and_separate_povm(unique_complete_circuits[i], observed_outcomes) + d = expanded_complete_circuit_cache.get(unique_complete_circuits[i], None) + if d is None: + unique_observed_outcomes = None if (dataset is None) else dataset[ds_circuits[i]].unique_outcomes + d = model.expand_instruments_and_separate_povm(unique_complete_circuits[i], unique_observed_outcomes) expanded_circuit_info_by_unique[i] = d # a dict of SeparatePOVMCircuits => tuples of outcome labels expanded_circuit_set.update(d) @@ -220,14 +219,11 @@ def __init__(self, circuits, model, dataset=None, max_cache_size=None, ds_circuits = _lt.apply_aliases_to_circuits(unique_circuits, aliases) #extract subcaches from layout_creation_circuit_cache: - if layout_creation_circuit_cache is not None: - self.completed_circuit_cache = layout_creation_circuit_cache.get('completed_circuits', None) - self.split_circuit_cache = layout_creation_circuit_cache.get('split_circuits', None) - self.expanded_and_separated_circuits_cache = layout_creation_circuit_cache.get('expanded_and_separated_circuits', None) - else: - self.completed_circuit_cache = None - self.split_circuit_cache = None - self.expanded_and_separated_circuits_cache = None + if layout_creation_circuit_cache is None: + layout_creation_circuit_cache = dict() + self.completed_circuit_cache = layout_creation_circuit_cache.get('completed_circuits', None) + self.split_circuit_cache = layout_creation_circuit_cache.get('split_circuits', None) + self.expanded_and_separated_circuits_cache = layout_creation_circuit_cache.get('expanded_and_separated_circuits', None) if self.completed_circuit_cache is None: unique_complete_circuits = model.complete_circuits(unique_circuits) diff --git a/pygsti/layouts/matrixlayout.py b/pygsti/layouts/matrixlayout.py index a5df2fee1..a37f4bc21 100644 --- a/pygsti/layouts/matrixlayout.py +++ b/pygsti/layouts/matrixlayout.py @@ -74,6 +74,9 @@ def __init__(self, unique_complete_circuits, unique_nospam_circuits, circuits_by ds_circuits, group, helpful_scratch, model, unique_circuits, dataset=None, expanded_and_separated_circuit_cache=None, double_expanded_nospam_circuits_cache = None): + if expanded_and_separated_circuit_cache is None: + expanded_and_separated_circuit_cache = dict() + #Note: group gives unique_nospam_circuits indices, which circuits_by_unique_nospam_circuits # turns into "unique complete circuit" indices, which the layout via it's to_unique can map # to original circuit indices. @@ -82,18 +85,13 @@ def add_expanded_circuits(indices, add_to_this_dict): for i in indices: nospam_c = unique_nospam_circuits[i] for unique_i in circuits_by_unique_nospam_circuits[nospam_c]: # "unique" circuits: add SPAM to nospam_c - if expanded_and_separated_circuit_cache is None: + #the cache is indexed into using the (potentially) incomplete circuits + expc_outcomes = expanded_and_separated_circuit_cache.get(unique_circuits[unique_i], None) + if expc_outcomes is None: #fall back on original non-cache behavior. observed_outcomes = None if (dataset is None) else dataset[ds_circuits[unique_i]].unique_outcomes expc_outcomes = model.expand_instruments_and_separate_povm(unique_complete_circuits[unique_i], observed_outcomes) - #Note: unique_complete_circuits may have duplicates (they're only unique *pre*-completion) - else: - #the cache is indexed into using the (potentially) incomplete circuits - expc_outcomes = expanded_and_separated_circuit_cache.get(unique_circuits[unique_i], None) - if expc_outcomes is None: #fall back on original non-cache behavior. - observed_outcomes = None if (dataset is None) else dataset[ds_circuits[unique_i]].unique_outcomes - expc_outcomes = model.expand_instruments_and_separate_povm(unique_complete_circuits[unique_i], observed_outcomes) - #and add this new value to the cache. - expanded_and_separated_circuit_cache[unique_circuits[unique_i]] = expc_outcomes + #and add this new value to the cache. + expanded_and_separated_circuit_cache[unique_circuits[unique_i]] = expc_outcomes for sep_povm_c, outcomes in expc_outcomes.items(): # for each expanded cir from unique_i-th circuit prep_lbl = sep_povm_c.circuit_without_povm[0] exp_nospam_c = sep_povm_c.circuit_without_povm[1:] # sep_povm_c *always* has prep lbl @@ -130,21 +128,16 @@ def add_expanded_circuits(indices, add_to_this_dict): expanded_nospam_circuits_plus_scratch = {i:cir for i, cir in enumerate(expanded_nospam_circuit_outcomes_plus_scratch.keys())} else: expanded_nospam_circuits_plus_scratch = expanded_nospam_circuits.copy() - + + if double_expanded_nospam_circuits_cache is None: + double_expanded_nospam_circuits_cache = dict() double_expanded_nospam_circuits_plus_scratch = dict() - if double_expanded_nospam_circuits_cache is not None: - for i, cir in expanded_nospam_circuits_plus_scratch.items(): - # expand sub-circuits for a more efficient tree - double_expanded_ckt = double_expanded_nospam_circuits_cache.get(cir, None) - if double_expanded_ckt is None: #Fall back to standard behavior and do expansion. - double_expanded_nospam_circuits_plus_scratch[i] = cir.expand_subcircuits() - else: - double_expanded_nospam_circuits_plus_scratch[i] = double_expanded_ckt - else: - for i, cir in expanded_nospam_circuits_plus_scratch.items(): - # expand sub-circuits for a more efficient tree - double_expanded_nospam_circuits_plus_scratch[i] = cir.expand_subcircuits() - + for i, cir in expanded_nospam_circuits_plus_scratch.items(): + # expand sub-circuits for a more efficient tree + double_expanded_ckt = double_expanded_nospam_circuits_cache.get(cir, None) + if double_expanded_ckt is None: #Fall back to standard behavior and do expansion. + double_expanded_ckt = cir.expand_subcircuits() + double_expanded_nospam_circuits_plus_scratch[i] = double_expanded_ckt self.tree = _EvalTree.create(double_expanded_nospam_circuits_plus_scratch) #print("Atom tree: %d circuits => tree of size %d" % (len(expanded_nospam_circuits), len(self.tree))) @@ -313,16 +306,12 @@ def __init__(self, circuits, model, dataset=None, num_sub_trees=None, num_tree_p ds_circuits = _lt.apply_aliases_to_circuits(unique_circuits, aliases) #extract subcaches from layout_creation_circuit_cache: - if layout_creation_circuit_cache is not None: - self.completed_circuit_cache = layout_creation_circuit_cache.get('completed_circuits', None) - self.split_circuit_cache = layout_creation_circuit_cache.get('split_circuits', None) - self.expanded_and_separated_circuits_cache = layout_creation_circuit_cache.get('expanded_and_separated_circuits', None) - self.expanded_subcircuits_no_spam_cache = layout_creation_circuit_cache.get('expanded_subcircuits_no_spam', None) - else: - self.completed_circuit_cache = None - self.split_circuit_cache = None - self.expanded_and_separated_circuits_cache = None - self.expanded_subcircuits_no_spam_cache = None + if layout_creation_circuit_cache is None: + layout_creation_circuit_cache = dict() + self.completed_circuit_cache = layout_creation_circuit_cache.get('completed_circuits', None) + self.split_circuit_cache = layout_creation_circuit_cache.get('split_circuits', None) + self.expanded_and_separated_circuits_cache = layout_creation_circuit_cache.get('expanded_and_separated_circuits', None) + self.expanded_subcircuits_no_spam_cache = layout_creation_circuit_cache.get('expanded_subcircuits_no_spam', None) if self.completed_circuit_cache is None: unique_complete_circuits, split_unique_circuits = model.complete_circuits(unique_circuits, return_split=True) @@ -337,7 +326,7 @@ def __init__(self, circuits, model, dataset=None, num_sub_trees=None, num_tree_p #Note: "unique" means a unique circuit *before* circuit-completion, so there could be duplicate # "unique circuits" after completion, e.g. "rho0Gx" and "Gx" could both complete to "rho0GxMdefault_0". - circuits_by_unique_nospam_circuits = _collections.OrderedDict() + circuits_by_unique_nospam_circuits = dict() if self.completed_circuit_cache is None: for i, (_, nospam_c, _) in enumerate(split_unique_circuits): if nospam_c in circuits_by_unique_nospam_circuits: @@ -345,12 +334,15 @@ def __init__(self, circuits, model, dataset=None, num_sub_trees=None, num_tree_p else: circuits_by_unique_nospam_circuits[nospam_c] = [i] #also create the split circuit cache at this point for future use. - self.split_circuit_cache = {unique_ckt:split_ckt for unique_ckt, split_ckt in zip(unique_circuits, split_unique_circuits)} + if self.split_circuit_cache is None: + self.split_circuit_cache = {unique_ckt:split_ckt for unique_ckt, split_ckt in zip(unique_circuits, split_unique_circuits)} else: + if self.split_circuit_cache is None: + self.split_circuit_cache = dict() for i, (c_unique_complete, c_unique) in enumerate(zip(unique_complete_circuits, unique_circuits)): split_ckt_tup = self.split_circuit_cache.get(c_unique, None) - nospam_c= split_ckt_tup[1] + nospam_c= split_ckt_tup[1] if split_ckt_tup is not None else None if nospam_c is None: split_ckt_tup = model.split_circuit(c_unique_complete) nospam_c= split_ckt_tup[1] From c6ed8ee0191d17dd163fc704b6a2ce13d25f3953 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Tue, 30 Jul 2024 21:42:45 -0600 Subject: [PATCH 24/41] Unrelated RB testing fix I accidentally put down the wrong directory for temp testing files in the RB testing code. --- test/unit/protocols/test_rb.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/test/unit/protocols/test_rb.py b/test/unit/protocols/test_rb.py index 9f461f098..7f531401c 100644 --- a/test/unit/protocols/test_rb.py +++ b/test/unit/protocols/test_rb.py @@ -114,9 +114,9 @@ def test_serialization(self): citerations=self.citerations, compilerargs=self.compiler_args, seed=self.seed, verbosity=self.verbosity, num_processes=1) - crb_design.write(f'{FILE_PATH}/../../test_packages/temp_test_files/test_CliffordRBDesign_serialization') + crb_design.write('../../test/test_packages/temp_test_files/test_CliffordRBDesign_serialization') #then read this back in - crb_design_read = _rb.CliffordRBDesign.from_dir(f'{FILE_PATH}/../../test_packages/temp_test_files/test_CliffordRBDesign_serialization') + crb_design_read = _rb.CliffordRBDesign.from_dir('../../test/test_packages/temp_test_files/test_CliffordRBDesign_serialization') self.assertEqual(crb_design.all_circuits_needing_data, crb_design_read.all_circuits_needing_data) self.assertEqual(crb_design.interleaved_circuit, crb_design_read.interleaved_circuit) @@ -166,9 +166,9 @@ def test_combined_design_access(self): def test_serialization(self): - self.irb_design.write(f'{FILE_PATH}/../../test_packages/temp_test_files/test_InterleavedRBDesign_serialization') + self.irb_design.write('../../test/test_packages/temp_test_files/test_InterleavedRBDesign_serialization') #then read this back in - irb_design_read = _rb.InterleavedRBDesign.from_dir(f'{FILE_PATH}/../../test_packages/temp_test_files/test_InterleavedRBDesign_serialization') + irb_design_read = _rb.InterleavedRBDesign.from_dir('../../test/test_packages/temp_test_files/test_InterleavedRBDesign_serialization') self.assertEqual(self.irb_design.all_circuits_needing_data, irb_design_read.all_circuits_needing_data) self.assertEqual(self.irb_design['crb'].all_circuits_needing_data, irb_design_read['crb'].all_circuits_needing_data) @@ -251,9 +251,9 @@ def test_serialization(self): conditionaltwirl=True, citerations=self.citerations, compilerargs=self.compiler_args, partitioned=False, seed=self.seed, verbosity=self.verbosity, num_processes=1) - drb_design.write(f'{FILE_PATH}/../../test_packages/temp_test_files/test_DirectRBDesign_serialization') + drb_design.write('../../test/test_packages/temp_test_files/test_DirectRBDesign_serialization') #then read this back in - drb_design_read = _rb.DirectRBDesign.from_dir(f'{FILE_PATH}/../../test_packages/temp_test_files/test_DirectRBDesign_serialization') + drb_design_read = _rb.DirectRBDesign.from_dir('../../test/test_packages/temp_test_files/test_DirectRBDesign_serialization') self.assertEqual(drb_design.all_circuits_needing_data, drb_design_read.all_circuits_needing_data) @@ -378,9 +378,9 @@ def test_serialization(self): localclifford=True, paulirandomize=True, seed=self.seed, verbosity=self.verbosity, num_processes=1) - mrb_design.write(f'{FILE_PATH}/../../test_packages/temp_test_files/test_MirrorRBDesign_serialization') + mrb_design.write('../../test/test_packages/temp_test_files/test_MirrorRBDesign_serialization') #then read this back in - mrb_design_read = _rb.MirrorRBDesign.from_dir(f'{FILE_PATH}/../../test_packages/temp_test_files/test_MirrorRBDesign_serialization') + mrb_design_read = _rb.MirrorRBDesign.from_dir('../../test/test_packages/temp_test_files/test_MirrorRBDesign_serialization') self.assertEqual(mrb_design.all_circuits_needing_data, mrb_design_read.all_circuits_needing_data) @@ -427,9 +427,9 @@ def test_serialization(self): sampler=self.sampler, samplerargs=self.samplerargs, seed=self.seed, verbosity=0) - birb_design.write(f'{FILE_PATH}/../../test_packages/temp_test_files/test_BinaryRBDesign_serialization') + birb_design.write('../../test/test_packages/temp_test_files/test_BinaryRBDesign_serialization') #then read this back in - birb_design_read = _rb.BinaryRBDesign.from_dir(f'{FILE_PATH}/../../test_packages/temp_test_files/test_BinaryRBDesign_serialization') + birb_design_read = _rb.BinaryRBDesign.from_dir('../../test/test_packages/temp_test_files/test_BinaryRBDesign_serialization') self.assertEqual(birb_design.all_circuits_needing_data, birb_design_read.all_circuits_needing_data) @@ -536,8 +536,8 @@ def test_cliffordrb_protocol_ideal(self): self.assertTrue(abs(result.fits['A-fixed'].estimates['r'])<=3e-5) #also test writing and reading the results from disk. - result.write(f'{FILE_PATH}/../../test_packages/temp_test_files/test_RandomizedBenchmarking_results') - result_read = pygsti.io.read_results_from_dir(f'{FILE_PATH}/../../test_packages/temp_test_files/test_RandomizedBenchmarking_results') + result.write('../../test/test_packages/temp_test_files/test_RandomizedBenchmarking_results') + result_read = pygsti.io.read_results_from_dir('../../test/test_packages/temp_test_files/test_RandomizedBenchmarking_results') def test_cliffordrb_protocol_noisy(self): proto = pygsti.protocols.rb.RandomizedBenchmarking(datatype='success_probabilities', defaultfit='A-fixed', rtype='EI', @@ -706,8 +706,8 @@ def test_interleavedrb_protocol_ideal(self): self.assertTrue(abs(estimated_irb_num) <= 1e-5) #also test writing and reading the results from disk. - result.write(f'{FILE_PATH}/../../test_packages/temp_test_files/test_InterleavedRandomizedBenchmarking_results') - result_read = pygsti.io.read_results_from_dir(f'{FILE_PATH}/../../test_packages/temp_test_files/test_InterleavedRandomizedBenchmarking_results') + result.write('../../test/test_packages/temp_test_files/test_InterleavedRandomizedBenchmarking_results') + result_read = pygsti.io.read_results_from_dir('../../test/test_packages/temp_test_files/test_InterleavedRandomizedBenchmarking_results') def test_interleavedrb_protocol_noisy(self): From 9ec8c878ab35ea103ae878d107548111f16c9562 Mon Sep 17 00:00:00 2001 From: "Stefan K. Seritan" Date: Thu, 19 Sep 2024 10:25:43 -0700 Subject: [PATCH 25/41] Make test_rb paths absolute. --- test/unit/protocols/test_rb.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/test/unit/protocols/test_rb.py b/test/unit/protocols/test_rb.py index 7f531401c..9f461f098 100644 --- a/test/unit/protocols/test_rb.py +++ b/test/unit/protocols/test_rb.py @@ -114,9 +114,9 @@ def test_serialization(self): citerations=self.citerations, compilerargs=self.compiler_args, seed=self.seed, verbosity=self.verbosity, num_processes=1) - crb_design.write('../../test/test_packages/temp_test_files/test_CliffordRBDesign_serialization') + crb_design.write(f'{FILE_PATH}/../../test_packages/temp_test_files/test_CliffordRBDesign_serialization') #then read this back in - crb_design_read = _rb.CliffordRBDesign.from_dir('../../test/test_packages/temp_test_files/test_CliffordRBDesign_serialization') + crb_design_read = _rb.CliffordRBDesign.from_dir(f'{FILE_PATH}/../../test_packages/temp_test_files/test_CliffordRBDesign_serialization') self.assertEqual(crb_design.all_circuits_needing_data, crb_design_read.all_circuits_needing_data) self.assertEqual(crb_design.interleaved_circuit, crb_design_read.interleaved_circuit) @@ -166,9 +166,9 @@ def test_combined_design_access(self): def test_serialization(self): - self.irb_design.write('../../test/test_packages/temp_test_files/test_InterleavedRBDesign_serialization') + self.irb_design.write(f'{FILE_PATH}/../../test_packages/temp_test_files/test_InterleavedRBDesign_serialization') #then read this back in - irb_design_read = _rb.InterleavedRBDesign.from_dir('../../test/test_packages/temp_test_files/test_InterleavedRBDesign_serialization') + irb_design_read = _rb.InterleavedRBDesign.from_dir(f'{FILE_PATH}/../../test_packages/temp_test_files/test_InterleavedRBDesign_serialization') self.assertEqual(self.irb_design.all_circuits_needing_data, irb_design_read.all_circuits_needing_data) self.assertEqual(self.irb_design['crb'].all_circuits_needing_data, irb_design_read['crb'].all_circuits_needing_data) @@ -251,9 +251,9 @@ def test_serialization(self): conditionaltwirl=True, citerations=self.citerations, compilerargs=self.compiler_args, partitioned=False, seed=self.seed, verbosity=self.verbosity, num_processes=1) - drb_design.write('../../test/test_packages/temp_test_files/test_DirectRBDesign_serialization') + drb_design.write(f'{FILE_PATH}/../../test_packages/temp_test_files/test_DirectRBDesign_serialization') #then read this back in - drb_design_read = _rb.DirectRBDesign.from_dir('../../test/test_packages/temp_test_files/test_DirectRBDesign_serialization') + drb_design_read = _rb.DirectRBDesign.from_dir(f'{FILE_PATH}/../../test_packages/temp_test_files/test_DirectRBDesign_serialization') self.assertEqual(drb_design.all_circuits_needing_data, drb_design_read.all_circuits_needing_data) @@ -378,9 +378,9 @@ def test_serialization(self): localclifford=True, paulirandomize=True, seed=self.seed, verbosity=self.verbosity, num_processes=1) - mrb_design.write('../../test/test_packages/temp_test_files/test_MirrorRBDesign_serialization') + mrb_design.write(f'{FILE_PATH}/../../test_packages/temp_test_files/test_MirrorRBDesign_serialization') #then read this back in - mrb_design_read = _rb.MirrorRBDesign.from_dir('../../test/test_packages/temp_test_files/test_MirrorRBDesign_serialization') + mrb_design_read = _rb.MirrorRBDesign.from_dir(f'{FILE_PATH}/../../test_packages/temp_test_files/test_MirrorRBDesign_serialization') self.assertEqual(mrb_design.all_circuits_needing_data, mrb_design_read.all_circuits_needing_data) @@ -427,9 +427,9 @@ def test_serialization(self): sampler=self.sampler, samplerargs=self.samplerargs, seed=self.seed, verbosity=0) - birb_design.write('../../test/test_packages/temp_test_files/test_BinaryRBDesign_serialization') + birb_design.write(f'{FILE_PATH}/../../test_packages/temp_test_files/test_BinaryRBDesign_serialization') #then read this back in - birb_design_read = _rb.BinaryRBDesign.from_dir('../../test/test_packages/temp_test_files/test_BinaryRBDesign_serialization') + birb_design_read = _rb.BinaryRBDesign.from_dir(f'{FILE_PATH}/../../test_packages/temp_test_files/test_BinaryRBDesign_serialization') self.assertEqual(birb_design.all_circuits_needing_data, birb_design_read.all_circuits_needing_data) @@ -536,8 +536,8 @@ def test_cliffordrb_protocol_ideal(self): self.assertTrue(abs(result.fits['A-fixed'].estimates['r'])<=3e-5) #also test writing and reading the results from disk. - result.write('../../test/test_packages/temp_test_files/test_RandomizedBenchmarking_results') - result_read = pygsti.io.read_results_from_dir('../../test/test_packages/temp_test_files/test_RandomizedBenchmarking_results') + result.write(f'{FILE_PATH}/../../test_packages/temp_test_files/test_RandomizedBenchmarking_results') + result_read = pygsti.io.read_results_from_dir(f'{FILE_PATH}/../../test_packages/temp_test_files/test_RandomizedBenchmarking_results') def test_cliffordrb_protocol_noisy(self): proto = pygsti.protocols.rb.RandomizedBenchmarking(datatype='success_probabilities', defaultfit='A-fixed', rtype='EI', @@ -706,8 +706,8 @@ def test_interleavedrb_protocol_ideal(self): self.assertTrue(abs(estimated_irb_num) <= 1e-5) #also test writing and reading the results from disk. - result.write('../../test/test_packages/temp_test_files/test_InterleavedRandomizedBenchmarking_results') - result_read = pygsti.io.read_results_from_dir('../../test/test_packages/temp_test_files/test_InterleavedRandomizedBenchmarking_results') + result.write(f'{FILE_PATH}/../../test_packages/temp_test_files/test_InterleavedRandomizedBenchmarking_results') + result_read = pygsti.io.read_results_from_dir(f'{FILE_PATH}/../../test_packages/temp_test_files/test_InterleavedRandomizedBenchmarking_results') def test_interleavedrb_protocol_noisy(self): From 599faa0437ec27ad9037bc142dfd7e79f10669d3 Mon Sep 17 00:00:00 2001 From: "Stefan K. Seritan" Date: Thu, 19 Sep 2024 14:35:14 -0700 Subject: [PATCH 26/41] Fix beta tests. --- pygsti/forwardsims/mapforwardsim.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pygsti/forwardsims/mapforwardsim.py b/pygsti/forwardsims/mapforwardsim.py index 3cba839bc..501c1f855 100644 --- a/pygsti/forwardsims/mapforwardsim.py +++ b/pygsti/forwardsims/mapforwardsim.py @@ -226,7 +226,8 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, array_types Determines how much output to send to stdout. 0 means no output, higher integers mean more output. - A precomputed dictionary serving as a cache for completed + layout_creation_circuit_cache: + A precomputed dictionary serving as a cache for completed circuits. I.e. circuits with prep labels and POVM labels appended. Along with other useful pre-computed circuit structures used in layout creation. From 4d40eb7f8caf0411f83da5beb9ab30cfac3c2262 Mon Sep 17 00:00:00 2001 From: "Stefan K. Seritan" Date: Thu, 19 Sep 2024 15:33:49 -0700 Subject: [PATCH 27/41] Merge resolution with #488. --- pygsti/models/model.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/pygsti/models/model.py b/pygsti/models/model.py index 4f0c5bd9c..d85964223 100644 --- a/pygsti/models/model.py +++ b/pygsti/models/model.py @@ -490,13 +490,6 @@ def __setstate__(self, state_dict): ## Get/Set methods ########################################## - @property - def parameter_labels(self): - """ - A list of labels, usually of the form `(op_label, string_description)` describing this model's parameters. - """ - return self._ops_paramlbls_to_model_paramlbls(self._paramlbls) - @property def sim(self): """ Forward simulator for this model """ @@ -611,14 +604,14 @@ def num_params(self): """ self._clean_paramvec() return len(self._paramvec) - + @property def parameter_labels(self): """ A list of labels, usually of the form `(op_label, string_description)` describing this model's parameters. """ self._clean_paramvec() - return self._paramlbls + return self._ops_paramlbls_to_model_paramlbls(self._paramlbls) def set_parameter_label(self, index, label): """ From bb9a278badcb7e807ab4a3b7e9b0f8f308e1c691 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Wed, 22 May 2024 17:02:05 -0400 Subject: [PATCH 28/41] main changes (breaks some calling functions elsewhere) --- pygsti/tools/matrixtools.py | 107 ++++++++++++++++++++++++++---------- 1 file changed, 78 insertions(+), 29 deletions(-) diff --git a/pygsti/tools/matrixtools.py b/pygsti/tools/matrixtools.py index 94940c45c..605cb8b78 100644 --- a/pygsti/tools/matrixtools.py +++ b/pygsti/tools/matrixtools.py @@ -64,6 +64,20 @@ def gram_matrix(m, adjoint=False): return out +def is_normal(m, tol=1e-9): + """ + Test whether m is a normal operator, in the sense that it commutes with its adjoint. + """ + if m.shape[0] != m.shape[1]: + return False + prefix_char, _, _ = _spl.blas.find_best_blas_type(dtype=m.dtype) + herk = BLAS_FUNCS["herk"][prefix_char] + trans = 2 if _np.iscomplexobj(m) else 1 + mdagm = herk( 1.0, m, trans=trans ) + mmdag = herk( -1.0, m, trans=0, c=mdagm, overwrite_c=True ) + return _np.all(_np.abs(mmdag) <= tol) + + def is_hermitian(mx, tol=1e-9): """ Test whether mx is a hermitian matrix. @@ -134,8 +148,7 @@ def is_valid_density_mx(mx, tol=1e-9): bool True if mx is a valid density matrix, otherwise False. """ - # is_pos_def includes a check that the matrix is Hermitian. - return abs(_np.trace(mx) - 1.0) < tol and is_pos_def(mx, tol) + return abs(_np.trace(mx) - 1.0) < tol and is_hermitian(mx, tol) and is_pos_def(mx, tol) def nullspace(m, tol=1e-7): @@ -192,6 +205,7 @@ def nullspace_qr(m, tol=1e-7): return q[:, rank:] +#TODO: remove the orthogonalize argument (requires changing functions that call this one) def nice_nullspace(m, tol=1e-7, orthogonalize=False): """ Computes the nullspace of a matrix, and tries to return a "nice" basis for it. @@ -214,19 +228,21 @@ def nice_nullspace(m, tol=1e-7, orthogonalize=False): ------- An matrix of shape (M,K) whose columns contain nullspace basis vectors. """ - nullsp = nullspace(m, tol) - dim_ker = nullsp.shape[1] - if dim_ker == 0: - return nullsp # empty 0-by-N array - _, _, p = _spl.qr(nullsp.T.conj(), mode='raw', pivoting=True) - ret = nullsp @ (nullsp.T[:, p[:dim_ker]]).conj() - # ^ That's equivalent to, but faster than: - # nullsp_projector = nullsp @ nullsp.T.conj() - # _, _, p = _spl.qr(nullsp_projector mode='raw', pivoting=True) - # ret = nullsp_projector[:, p[:dim_ker]] - - if orthogonalize: - ret, _ = _spl.qr(ret, mode='economic') + + # + # nullsp = nullspace(m, tol) + # dim_ker = nullsp.shape[1] + # _, _, p = _spl.qr(nullsp.T.conj(), mode='raw', pivoting=True) + # ret = nullsp @ (nullsp.T[:, p[dim_ker]]).conj() + # + ## ^ Equivalent to, but faster than the following + ## + ## nullsp_projector = nullsp @ nullsp.T.conj() + ## ret = nullsp_projector[:, p[:dim_ker]] + ## + # + + ret = nullspace(m, tol) for j in range(ret.shape[1]): # normalize columns so largest element is +1.0 imax = _np.argmax(_np.abs(ret[:, j])) if abs(ret[imax, j]) > 1e-6: @@ -235,7 +251,7 @@ def nice_nullspace(m, tol=1e-7, orthogonalize=False): return ret -def normalize_columns(m, return_norms=False, ord=None): +def normalize_columns(m, return_norms=False, norm_ord=None): """ Normalizes the columns of a matrix. @@ -248,7 +264,7 @@ def normalize_columns(m, return_norms=False, ord=None): If `True`, also return a 1D array containing the norms of the columns (before they were normalized). - ord : int or list of ints, optional + norm_ord : int or list of ints, optional The order of the norm. See :func:`numpy.linalg.norm`. An array of orders can be given to specify the norm on a per-column basis. @@ -262,13 +278,13 @@ def normalize_columns(m, return_norms=False, ord=None): Only returned when `return_norms=True`, a 1-dimensional array of the pre-normalization norm of each column. """ - norms = column_norms(m, ord) + norms = column_norms(m, norm_ord) norms[norms == 0.0] = 1.0 # avoid division of zero-column by zero normalized_m = scale_columns(m, 1 / norms) return (normalized_m, norms) if return_norms else normalized_m -def column_norms(m, ord=None): +def column_norms(m, norm_ord=None): """ Compute the norms of the columns of a matrix. @@ -288,14 +304,14 @@ def column_norms(m, ord=None): A 1-dimensional array of the column norms (length is number of columns of `m`). """ if _sps.issparse(m): - ord_list = ord if isinstance(ord, (list, _np.ndarray)) else [ord] * m.shape[1] + ord_list = norm_ord if isinstance(norm_ord, (list, _np.ndarray)) else [norm_ord] * m.shape[1] assert(len(ord_list) == m.shape[1]) norms = _np.array([_np.linalg.norm(m[:, j].toarray(), ord=o) for j, o in enumerate(ord_list)]) - elif isinstance(ord, (list, _np.ndarray)): - assert(len(ord) == m.shape[1]) - norms = _np.array([_np.linalg.norm(m[:, j], ord=o) for j, o in enumerate(ord)]) + elif isinstance(norm_ord, (list, _np.ndarray)): + assert(len(norm_ord) == m.shape[1]) + norms = _np.array([_np.linalg.norm(m[:, j], ord=o) for j, o in enumerate(norm_ord)]) else: - norms = _np.linalg.norm(m, axis=0, ord=ord) + norms = _np.linalg.norm(m, axis=0, ord=norm_ord) return norms @@ -417,8 +433,6 @@ def independent_columns(m, initial_independent_cols=None, tol=1e-7): if initial_independent_cols is None: proj_m = m.copy() else: - # We assume initial_independent_cols is full column-rank. - # This lets us use unpivoted QR instead of pivoted QR or SVD. assert initial_independent_cols.shape[0] == m.shape[0] q = _spl.qr(initial_independent_cols, mode='econ')[0] # proj_m = (I - qq')m @@ -642,6 +656,7 @@ def mx_to_string_complex(m, real_width=9, im_width=9, prec=4): return s +#TODO: revert changes in the function below. def unitary_superoperator_matrix_log(m, mx_basis): """ Construct the logarithm of superoperator matrix `m`. @@ -671,11 +686,16 @@ def unitary_superoperator_matrix_log(m, mx_basis): from . import lindbladtools as _lt # (would create circular imports if at top) from . import optools as _ot # (would create circular imports if at top) + # Riley question: what assumptions do we have for the input m? The call to eigvals + # below is intended for fully-general matrices. I imagine we (typically) have structure + # that makes it preferable to call some other function (li) M_std = change_basis(m, mx_basis, "std") evals = _np.linalg.eigvals(M_std) - assert(_np.allclose(_np.abs(evals), 1.0)) # simple but technically incomplete check for a unitary superop - # (e.g. could be anti-unitary: diag(1, -1, -1, -1)) - + assert(_np.allclose(_np.abs(evals), 1.0)) + # ^ simple but technically incomplete check for a unitary superop + # (e.g. could be anti-unitary: diag(1, -1, -1, -1)) + + # ^ Riley question: U = _ot.std_process_mx_to_unitary(M_std) H = _spl.logm(U) / -1j # U = exp(-iH) logM_std = _lt.create_elementary_errorgen('H', H) # rho --> -i[H, rho] @@ -1424,6 +1444,35 @@ def _findx(a, inds, always_copy=False): return a_inds +# TODO: reevaluate the need for this function. It seems like we could just in-line @ +# and let operator overloading and implementations of __matmul__ and __rmatmul__ +# handle it. +def safe_dot(a, b): + """ + Performs dot(a,b) correctly when neither, either, or both arguments are sparse matrices. + + Parameters + ---------- + a : numpy.ndarray or scipy.sparse matrix. + First matrix. + + b : numpy.ndarray or scipy.sparse matrix. + Second matrix. + + Returns + ------- + numpy.ndarray or scipy.sparse matrix + """ + if _sps.issparse(a): + return a.dot(b) # sparseMx.dot works for both sparse and dense args + elif _sps.issparse(b): + # to return a sparse mx even when a is dense (asymmetric behavior): + # --> return _sps.csr_matrix(a).dot(b) # numpyMx.dot can't handle sparse argument + return _np.dot(a, b.toarray()) + else: + return _np.dot(a, b) + + def safe_norm(a, part=None): """ Get the frobenius norm of a matrix or vector, `a`, when it is either a dense array or a sparse matrix. From cf7dcd16b3e05515d95b25736e0255dd88e4e743 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Thu, 23 May 2024 10:16:27 -0400 Subject: [PATCH 29/41] check in --- pygsti/extras/interpygate/__init__.py | 13 ++----- .../extras/interpygate/process_tomography.py | 39 +++---------------- pygsti/tools/matrixtools.py | 18 ++++----- 3 files changed, 18 insertions(+), 52 deletions(-) diff --git a/pygsti/extras/interpygate/__init__.py b/pygsti/extras/interpygate/__init__.py index 1155ee3f1..f126dee97 100644 --- a/pygsti/extras/interpygate/__init__.py +++ b/pygsti/extras/interpygate/__init__.py @@ -11,14 +11,9 @@ from .core import PhysicalProcess, InterpolatedDenseOp, InterpolatedOpFactory from .process_tomography import vec, unvec, run_process_tomography -# Note from Riley on September, 2024: +# Note from Riley on May 22, 2024: # -# vec is deprecated, and shouldn't be called anywhere in the codebase. -# -# unvec is deprecated and replaced with unvec_square; the latter function -# isn't imported here because we don't want people to access it just from -# the pygsti.extras.interpygate namespace. -# -# Ideally we'd remove vec and unvec from the pygsti.extras.interpygate namespace -# and only have them available in pygsti.extras.interpygate.process_tomography. +# I wanted to remove the implementations of vec and unvec and just in-line equivalent +# code in the few places they were used. However, the fact that they're included in this +# __init__.py file suggests that they might be used outside of pyGSTi itself. # diff --git a/pygsti/extras/interpygate/process_tomography.py b/pygsti/extras/interpygate/process_tomography.py index fba79adb6..9d775f00c 100644 --- a/pygsti/extras/interpygate/process_tomography.py +++ b/pygsti/extras/interpygate/process_tomography.py @@ -34,7 +34,7 @@ def vec(matrix): """ matrix = _np.array(matrix) if matrix.shape == (len(matrix), len(matrix)): - return matrix.reshape(shape=(matrix.size, 1), order='F') + return matrix.reshape((-1, 1), order='F') else: raise ValueError('The input matrix must be square.') @@ -54,39 +54,10 @@ def unvec(vectorized): ValueError: If the length of the input is not a perfect square """ - return unvec_square(vectorized, order='F') - - -def unvec_square(vectorized, order): - """ - Takes a vector whose length is a perfect square, and returns a square matrix - representation by reading from the vectors entries to define the matrix in - column-major order (order='F') or row-major order (order='C'). - - Args: - vectorized: array-like, where np.array(vectorized).size is a perfect square. - order: 'F' or 'C' - - Returns: - numpy.ndarray: NxN dimensional array - - Raises: - ValueError: If the length of the input is not a perfect square. - - """ - assert order == 'F' or order == 'C' - if not isinstance(vectorized, _np.ndarray): - vectorized = _np.array(vectorized) - - if vectorized.ndim == 2: - assert min(vectorized.shape) == 1 - vectorized = vectorized.ravel() - elif vectorized.ndim > 2: - raise ValueError('vectorized.ndim must be <= 2.') - - n = int(_np.sqrt(max(vectorized.shape))) - if len(vectorized) == n ** 2: - return vectorized.reshape((n, n), order=order) + vectorized = _np.array(vectorized) + dim = int(_np.sqrt(max(vectorized.shape))) + if len(vectorized) == dim ** 2: + return vectorized.reshape((dim, dim), order='F') else: msg = 'The input vector length must be a perfect square, but this input has length %d.' % len(vectorized) raise ValueError(msg) diff --git a/pygsti/tools/matrixtools.py b/pygsti/tools/matrixtools.py index 605cb8b78..60a10b0b3 100644 --- a/pygsti/tools/matrixtools.py +++ b/pygsti/tools/matrixtools.py @@ -251,7 +251,7 @@ def nice_nullspace(m, tol=1e-7, orthogonalize=False): return ret -def normalize_columns(m, return_norms=False, norm_ord=None): +def normalize_columns(m, return_norms=False, ord=None): """ Normalizes the columns of a matrix. @@ -264,7 +264,7 @@ def normalize_columns(m, return_norms=False, norm_ord=None): If `True`, also return a 1D array containing the norms of the columns (before they were normalized). - norm_ord : int or list of ints, optional + ord : int or list of ints, optional The order of the norm. See :func:`numpy.linalg.norm`. An array of orders can be given to specify the norm on a per-column basis. @@ -278,13 +278,13 @@ def normalize_columns(m, return_norms=False, norm_ord=None): Only returned when `return_norms=True`, a 1-dimensional array of the pre-normalization norm of each column. """ - norms = column_norms(m, norm_ord) + norms = column_norms(m, ord) norms[norms == 0.0] = 1.0 # avoid division of zero-column by zero normalized_m = scale_columns(m, 1 / norms) return (normalized_m, norms) if return_norms else normalized_m -def column_norms(m, norm_ord=None): +def column_norms(m, ord=None): """ Compute the norms of the columns of a matrix. @@ -304,14 +304,14 @@ def column_norms(m, norm_ord=None): A 1-dimensional array of the column norms (length is number of columns of `m`). """ if _sps.issparse(m): - ord_list = norm_ord if isinstance(norm_ord, (list, _np.ndarray)) else [norm_ord] * m.shape[1] + ord_list = ord if isinstance(ord, (list, _np.ndarray)) else [ord] * m.shape[1] assert(len(ord_list) == m.shape[1]) norms = _np.array([_np.linalg.norm(m[:, j].toarray(), ord=o) for j, o in enumerate(ord_list)]) - elif isinstance(norm_ord, (list, _np.ndarray)): - assert(len(norm_ord) == m.shape[1]) - norms = _np.array([_np.linalg.norm(m[:, j], ord=o) for j, o in enumerate(norm_ord)]) + elif isinstance(ord, (list, _np.ndarray)): + assert(len(ord) == m.shape[1]) + norms = _np.array([_np.linalg.norm(m[:, j], ord=o) for j, o in enumerate(ord)]) else: - norms = _np.linalg.norm(m, axis=0, ord=norm_ord) + norms = _np.linalg.norm(m, axis=0, ord=ord) return norms From e7b99e9b00203e90a755bd3d4d512256afce5417 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Thu, 23 May 2024 10:28:37 -0400 Subject: [PATCH 30/41] remove change that wasnt strictly in-scope for the PR --- pygsti/extras/interpygate/process_tomography.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pygsti/extras/interpygate/process_tomography.py b/pygsti/extras/interpygate/process_tomography.py index 9d775f00c..9ce91cdbd 100644 --- a/pygsti/extras/interpygate/process_tomography.py +++ b/pygsti/extras/interpygate/process_tomography.py @@ -34,7 +34,7 @@ def vec(matrix): """ matrix = _np.array(matrix) if matrix.shape == (len(matrix), len(matrix)): - return matrix.reshape((-1, 1), order='F') + return _np.array([_np.concatenate(_np.array(matrix).T)]).T else: raise ValueError('The input matrix must be square.') @@ -55,9 +55,9 @@ def unvec(vectorized): """ vectorized = _np.array(vectorized) - dim = int(_np.sqrt(max(vectorized.shape))) - if len(vectorized) == dim ** 2: - return vectorized.reshape((dim, dim), order='F') + length = int(_np.sqrt(max(vectorized.shape))) + if len(vectorized) == length ** 2: + return _np.reshape(vectorized, [length, length]).T else: msg = 'The input vector length must be a perfect square, but this input has length %d.' % len(vectorized) raise ValueError(msg) From b0b6d7db164ccf1785bfbc718ad89a34b814dcf6 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Thu, 23 May 2024 10:36:15 -0400 Subject: [PATCH 31/41] remove changes that werent strictly necessary --- pygsti/tools/matrixtools.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/pygsti/tools/matrixtools.py b/pygsti/tools/matrixtools.py index 60a10b0b3..9254b3cba 100644 --- a/pygsti/tools/matrixtools.py +++ b/pygsti/tools/matrixtools.py @@ -148,7 +148,8 @@ def is_valid_density_mx(mx, tol=1e-9): bool True if mx is a valid density matrix, otherwise False. """ - return abs(_np.trace(mx) - 1.0) < tol and is_hermitian(mx, tol) and is_pos_def(mx, tol) + # is_pos_def includes a check that the matrix is Hermitian. + return abs(_np.trace(mx) - 1.0) < tol and is_pos_def(mx, tol) def nullspace(m, tol=1e-7): @@ -656,7 +657,6 @@ def mx_to_string_complex(m, real_width=9, im_width=9, prec=4): return s -#TODO: revert changes in the function below. def unitary_superoperator_matrix_log(m, mx_basis): """ Construct the logarithm of superoperator matrix `m`. @@ -686,16 +686,11 @@ def unitary_superoperator_matrix_log(m, mx_basis): from . import lindbladtools as _lt # (would create circular imports if at top) from . import optools as _ot # (would create circular imports if at top) - # Riley question: what assumptions do we have for the input m? The call to eigvals - # below is intended for fully-general matrices. I imagine we (typically) have structure - # that makes it preferable to call some other function (li) M_std = change_basis(m, mx_basis, "std") evals = _np.linalg.eigvals(M_std) - assert(_np.allclose(_np.abs(evals), 1.0)) - # ^ simple but technically incomplete check for a unitary superop - # (e.g. could be anti-unitary: diag(1, -1, -1, -1)) - - # ^ Riley question: + assert(_np.allclose(_np.abs(evals), 1.0)) # simple but technically incomplete check for a unitary superop + # (e.g. could be anti-unitary: diag(1, -1, -1, -1)) + U = _ot.std_process_mx_to_unitary(M_std) H = _spl.logm(U) / -1j # U = exp(-iH) logM_std = _lt.create_elementary_errorgen('H', H) # rho --> -i[H, rho] From ec12bdb2eaa05a61b7b40710250caba21ff444c1 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Thu, 23 May 2024 14:12:03 -0400 Subject: [PATCH 32/41] tests pass --- pygsti/tools/matrixtools.py | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/pygsti/tools/matrixtools.py b/pygsti/tools/matrixtools.py index 9254b3cba..6ce5ade07 100644 --- a/pygsti/tools/matrixtools.py +++ b/pygsti/tools/matrixtools.py @@ -206,7 +206,6 @@ def nullspace_qr(m, tol=1e-7): return q[:, rank:] -#TODO: remove the orthogonalize argument (requires changing functions that call this one) def nice_nullspace(m, tol=1e-7, orthogonalize=False): """ Computes the nullspace of a matrix, and tries to return a "nice" basis for it. @@ -229,21 +228,19 @@ def nice_nullspace(m, tol=1e-7, orthogonalize=False): ------- An matrix of shape (M,K) whose columns contain nullspace basis vectors. """ - - # - # nullsp = nullspace(m, tol) - # dim_ker = nullsp.shape[1] - # _, _, p = _spl.qr(nullsp.T.conj(), mode='raw', pivoting=True) - # ret = nullsp @ (nullsp.T[:, p[dim_ker]]).conj() - # - ## ^ Equivalent to, but faster than the following - ## - ## nullsp_projector = nullsp @ nullsp.T.conj() - ## ret = nullsp_projector[:, p[:dim_ker]] - ## - # - - ret = nullspace(m, tol) + nullsp = nullspace(m, tol) + dim_ker = nullsp.shape[1] + if dim_ker == 0: + return nullsp # empty 0-by-N array + _, _, p = _spl.qr(nullsp.T.conj(), mode='raw', pivoting=True) + ret = nullsp @ (nullsp.T[:, p[:dim_ker]]).conj() + # ^ That's equivalent to, but faster than: + # nullsp_projector = nullsp @ nullsp.T.conj() + # _, _, p = _spl.qr(nullsp_projector mode='raw', pivoting=True) + # ret = nullsp_projector[:, p[:dim_ker]] + + if orthogonalize: + ret, _ = _spl.qr(ret, mode='economic') for j in range(ret.shape[1]): # normalize columns so largest element is +1.0 imax = _np.argmax(_np.abs(ret[:, j])) if abs(ret[imax, j]) > 1e-6: From a156511a80bec4bf16b88b95b3bd7eb593105039 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Thu, 23 May 2024 14:15:26 -0400 Subject: [PATCH 33/41] remove is_normal function --- pygsti/tools/matrixtools.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/pygsti/tools/matrixtools.py b/pygsti/tools/matrixtools.py index 6ce5ade07..c3c688ff9 100644 --- a/pygsti/tools/matrixtools.py +++ b/pygsti/tools/matrixtools.py @@ -64,20 +64,6 @@ def gram_matrix(m, adjoint=False): return out -def is_normal(m, tol=1e-9): - """ - Test whether m is a normal operator, in the sense that it commutes with its adjoint. - """ - if m.shape[0] != m.shape[1]: - return False - prefix_char, _, _ = _spl.blas.find_best_blas_type(dtype=m.dtype) - herk = BLAS_FUNCS["herk"][prefix_char] - trans = 2 if _np.iscomplexobj(m) else 1 - mdagm = herk( 1.0, m, trans=trans ) - mmdag = herk( -1.0, m, trans=0, c=mdagm, overwrite_c=True ) - return _np.all(_np.abs(mmdag) <= tol) - - def is_hermitian(mx, tol=1e-9): """ Test whether mx is a hermitian matrix. From 362788f022dc0c66f0df0dc341aef16d41813660 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Thu, 23 May 2024 16:35:02 -0400 Subject: [PATCH 34/41] add a comment and remove unused imports --- pygsti/tools/matrixtools.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pygsti/tools/matrixtools.py b/pygsti/tools/matrixtools.py index c3c688ff9..a83f940ba 100644 --- a/pygsti/tools/matrixtools.py +++ b/pygsti/tools/matrixtools.py @@ -417,6 +417,8 @@ def independent_columns(m, initial_independent_cols=None, tol=1e-7): if initial_independent_cols is None: proj_m = m.copy() else: + # We assume initial_independent_cols is full column-rank. + # This lets us use unpivoted QR instead of pivoted QR or SVD. assert initial_independent_cols.shape[0] == m.shape[0] q = _spl.qr(initial_independent_cols, mode='econ')[0] # proj_m = (I - qq')m From a67f17f743c88fc9913c49a41b4899ddb89c4951 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Tue, 24 Sep 2024 12:38:40 -0400 Subject: [PATCH 35/41] interpygate helpers --- pygsti/extras/interpygate/__init__.py | 13 +++++-- .../extras/interpygate/process_tomography.py | 39 ++++++++++++++++--- test/test_packages/extras/test_interpygate.py | 6 +-- 3 files changed, 45 insertions(+), 13 deletions(-) diff --git a/pygsti/extras/interpygate/__init__.py b/pygsti/extras/interpygate/__init__.py index f126dee97..1155ee3f1 100644 --- a/pygsti/extras/interpygate/__init__.py +++ b/pygsti/extras/interpygate/__init__.py @@ -11,9 +11,14 @@ from .core import PhysicalProcess, InterpolatedDenseOp, InterpolatedOpFactory from .process_tomography import vec, unvec, run_process_tomography -# Note from Riley on May 22, 2024: +# Note from Riley on September, 2024: # -# I wanted to remove the implementations of vec and unvec and just in-line equivalent -# code in the few places they were used. However, the fact that they're included in this -# __init__.py file suggests that they might be used outside of pyGSTi itself. +# vec is deprecated, and shouldn't be called anywhere in the codebase. +# +# unvec is deprecated and replaced with unvec_square; the latter function +# isn't imported here because we don't want people to access it just from +# the pygsti.extras.interpygate namespace. +# +# Ideally we'd remove vec and unvec from the pygsti.extras.interpygate namespace +# and only have them available in pygsti.extras.interpygate.process_tomography. # diff --git a/pygsti/extras/interpygate/process_tomography.py b/pygsti/extras/interpygate/process_tomography.py index 9ce91cdbd..42908777e 100644 --- a/pygsti/extras/interpygate/process_tomography.py +++ b/pygsti/extras/interpygate/process_tomography.py @@ -34,7 +34,7 @@ def vec(matrix): """ matrix = _np.array(matrix) if matrix.shape == (len(matrix), len(matrix)): - return _np.array([_np.concatenate(_np.array(matrix).T)]).T + return matrix.reshape(shape=(matrix.size, 1), order='F') else: raise ValueError('The input matrix must be square.') @@ -54,10 +54,39 @@ def unvec(vectorized): ValueError: If the length of the input is not a perfect square """ - vectorized = _np.array(vectorized) - length = int(_np.sqrt(max(vectorized.shape))) - if len(vectorized) == length ** 2: - return _np.reshape(vectorized, [length, length]).T + return unvec_square(vectorized, order='F') + + +def unvec_square(vectorized, order): + """ + Takes a vector whose length is a perfect square, and returns a square matrix + representation by reading from the vectors entries to define the matrix in + column-major order (order='F') or row-major order (order='C'). + + Args: + vectorized: array-like, where np.array(vectorized).size is a perfect square. + order: 'F' or 'C' + + Returns: + numpy.ndarray: NxN dimensional array + + Raises: + ValueError: If the length of the input is not a perfect square. + + """ + assert order == 'F' or order == 'C' + if not isinstance(vectorized, _np.ndarray): + vectorized = _np.array(vectorized) + + if vectorized.ndim == 2: + assert min(vectorized.shape) == 1 + vectorized = vectorized.ravel() + elif vectorized.ndim > 2: + raise ValueError('vectorized.ndim must be <= 2.') + + n = int(_np.sqrt(max(vectorized.shape))) + if len(vectorized) == n ** 2: + return vectorized.reshape(shape=(n, n), order=order) else: msg = 'The input vector length must be a perfect square, but this input has length %d.' % len(vectorized) raise ValueError(msg) diff --git a/test/test_packages/extras/test_interpygate.py b/test/test_packages/extras/test_interpygate.py index ea8ccfc83..97e76e936 100644 --- a/test/test_packages/extras/test_interpygate.py +++ b/test/test_packages/extras/test_interpygate.py @@ -51,8 +51,7 @@ def advance(self, state, v, t): L = dephasing * self.dephasing_generator + decoherence * self.decoherence_generator process = change_basis(_expm((H + L) * t), 'pp', 'col') - vec_state = _np.outer(state, state.conj()).ravel(order='F') - state = unvec_square(_np.dot(process, vec_state), 'F') + state = unvec_square(_np.dot(process, _np.outer(state, state.conj()).ravel(order='F')), 'F') return state def create_process_matrix(self, v, comm=None): @@ -103,8 +102,7 @@ def advance(self, state, v, times): L = dephasing * self.dephasing_generator + decoherence * self.decoherence_generator processes = [change_basis(_expm((H + L) * t), 'pp', 'col') for t in times] - vec_state = _np.outer(state, state.conj()).ravel(order='F') - states = [unvec_square(_np.dot(process, vec_state),'F') for process in processes] + states = [unvec_square(_np.dot(process, _np.outer(state, state.conj())).ravel(order='F'),'F') for process in processes] return states From e79b2af67d8c8a1096892c120cc83e3710352030 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Tue, 24 Sep 2024 12:59:51 -0400 Subject: [PATCH 36/41] remove safe_dot --- pygsti/tools/matrixtools.py | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/pygsti/tools/matrixtools.py b/pygsti/tools/matrixtools.py index a83f940ba..94940c45c 100644 --- a/pygsti/tools/matrixtools.py +++ b/pygsti/tools/matrixtools.py @@ -1424,35 +1424,6 @@ def _findx(a, inds, always_copy=False): return a_inds -# TODO: reevaluate the need for this function. It seems like we could just in-line @ -# and let operator overloading and implementations of __matmul__ and __rmatmul__ -# handle it. -def safe_dot(a, b): - """ - Performs dot(a,b) correctly when neither, either, or both arguments are sparse matrices. - - Parameters - ---------- - a : numpy.ndarray or scipy.sparse matrix. - First matrix. - - b : numpy.ndarray or scipy.sparse matrix. - Second matrix. - - Returns - ------- - numpy.ndarray or scipy.sparse matrix - """ - if _sps.issparse(a): - return a.dot(b) # sparseMx.dot works for both sparse and dense args - elif _sps.issparse(b): - # to return a sparse mx even when a is dense (asymmetric behavior): - # --> return _sps.csr_matrix(a).dot(b) # numpyMx.dot can't handle sparse argument - return _np.dot(a, b.toarray()) - else: - return _np.dot(a, b) - - def safe_norm(a, part=None): """ Get the frobenius norm of a matrix or vector, `a`, when it is either a dense array or a sparse matrix. From 2cdfe8736830b617387d121a1780fe7f79a6c6e1 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Tue, 24 Sep 2024 13:43:07 -0400 Subject: [PATCH 37/41] rebase step 1 --- pygsti/circuits/circuit.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pygsti/circuits/circuit.py b/pygsti/circuits/circuit.py index 822fd0b65..778790405 100644 --- a/pygsti/circuits/circuit.py +++ b/pygsti/circuits/circuit.py @@ -3537,7 +3537,7 @@ def cnt(obj): # obj is either a simple label or a list return sum([cnt(sub) for sub in obj]) return sum([cnt(layer_lbl) for layer_lbl in self._labels]) - + def _togrid(self, identity_name): """ return a list-of-lists rep? """ d = self.num_layers From 024d55380a12b823872da1b941df05c2ab13f61a Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Mon, 30 Sep 2024 11:04:30 -0400 Subject: [PATCH 38/41] found lots of other commented-out function definitions --- pygsti/algorithms/mirroring.py | 388 -------- pygsti/algorithms/randomcircuit.py | 1131 ---------------------- pygsti/algorithms/rbfit.py | 234 ----- pygsti/baseobjs/errorgenbasis.py | 122 --- pygsti/baseobjs/errorgenspace.py | 10 - pygsti/baseobjs/polynomial.py | 568 ----------- pygsti/data/datacomparator.py | 5 - pygsti/data/hypothesistest.py | 7 - pygsti/evotypes/chp/statereps.py | 22 - pygsti/evotypes/statevec_slow/opreps.py | 5 - pygsti/extras/rb/dataset.py | 31 - pygsti/extras/rb/io.py | 76 -- pygsti/modelmembers/povms/denseeffect.py | 142 --- pygsti/modelpacks/stdtarget.py | 121 --- pygsti/protocols/vb.py | 188 ---- pygsti/report/workspaceplots.py | 414 -------- pygsti/tools/pdftools.py | 25 - pygsti/tools/rbtheory.py | 249 ----- 18 files changed, 3738 deletions(-) delete mode 100644 pygsti/modelmembers/povms/denseeffect.py diff --git a/pygsti/algorithms/mirroring.py b/pygsti/algorithms/mirroring.py index 74dc2bcec..a6b4ee49d 100644 --- a/pygsti/algorithms/mirroring.py +++ b/pygsti/algorithms/mirroring.py @@ -21,92 +21,6 @@ from . import randomcircuit as _rc -# ### TODO: THIS IS TIMS OLD CODE WHICH SHOULD PERHAPS ALSO BE AN OPTION IN THE `CREATE_MIRROR_CIRCUIT` FUNCTION -# def create_mirror_circuit(circ, pspec, circtype='Clifford+Gzr', pauli_labels=None, pluspi_prob=0.): -# """ -# ***************************************************************** -# Function currently has the following limitations that need fixing: - -# - A layer contains only Clifford or Gzr gates on ALL the qubits. -# - all of the Clifford gates are self inverse -# - The qubits are labelled "Q0" through "Qn-1" -- THIS SHOULD NOW BE FIXED! -# - Pauli's are labelled by "Gi", "Gxpi", "Gypi" and "Gzpi". -# - There's no option for randomized prep/meas -# - There's no option for randomly adding +/-pi to the Z rotation angles. -# - There's no option for adding "barriers" -# - There's no test that the 'Gzr' gate has the "correct" convention for a rotation angle -# (a rotation by pi must be a Z gate) or that it's a rotation around Z. -# ***************************************************************** -# """ -# assert(circtype == 'Clifford+Gzr' or circtype == 'Clifford') -# n = circ.width -# d = circ.depth -# if pauli_labels is None: pauli_labels = ['Gi', 'Gxpi', 'Gypi', 'Gzpi'] -# qubits = circ.line_labels -# identity = _np.identity(2 * n, _np.int64) -# zrotname = 'Gzr' -# # qubit_labels = ['G{}'.format(i) for i in range(n)] - -# _, gate_inverse = pspec.compute_one_qubit_gate_relations() -# gate_inverse.update(pspec.compute_multiqubit_inversion_relations()) # add multiQ inverses - -# quasi_inverse_circ = [] -# central_pauli_circ = _cir.Circuit([[_lbl.Label(pauli_labels[_np.random.randint(0, 4)], q) for q in qubits]]) -# #telescoping_pauli = central_pauli_layer.copy() -# # The telescoping Pauli in the symplectic rep. -# telp_s, telp_p = _symp.symplectic_rep_of_clifford_circuit(central_pauli_circ, pspec=pspec) -# assert(_np.sum(_np.abs(telp_s - identity)) <= 1e-8) # Check that it's a Pauli. - -# for d_ind in range(d): -# layer = circ.layer(d - d_ind - 1) -# if layer[0].name == zrotname: -# quasi_inverse_layer = [] -# for gate in layer: - -# q_int = qubits.index(gate.qubits[0]) -# angle = float(gate.args[0]) - -# if telp_p[n + q_int] == 0: rotation_sign = -1. # If the Pauli is Z or I. -# else: rotation_sign = +1 # If the Pauli is X or Y. - -# # Sets the quasi inversion angle to + or - the original angle, depending on the Paul -# quasi_inverse_angle = rotation_sign * angle -# # Decides whether to add with to add +/- pi to the rotation angle. -# if _np.random.binomial(1, pluspi_prob) == 1: -# quasi_inverse_angle += _np.pi * (-1)**_np.random.binomial(1, 0.5) -# quasi_inverse_angle = _comp.mod_2pi(quasi_inverse_angle) -# # Updates the telescoping Pauli (in the symplectic rep_, to include this added pi-rotation, -# # as we need to include it as we keep collapsing the circuit down. -# telp_p[q_int] = (telp_p[q_int] + 2) % 4 -# # Constructs the quasi-inverse gate. -# quasi_inverse_gate = _lbl.Label(zrotname, gate.qubits, args=(str(quasi_inverse_angle),)) -# quasi_inverse_layer.append(quasi_inverse_gate) - -# # We don't have to update the telescoping Pauli as it's unchanged, but when we update -# # this it'll need to change. -# #telp_p = telp_p - -# else: -# quasi_inverse_layer = [_lbl.Label(gate_inverse[gate.name], gate.qubits) for gate in layer] -# telp_layer = _symp.find_pauli_layer(telp_p, pauli_labels, qubits) -# conjugation_circ = _cir.Circuit([layer, telp_layer, quasi_inverse_layer]) -# # We calculate what the new telescoping Pauli is, in the symplectic rep. -# telp_s, telp_p = _symp.symplectic_rep_of_clifford_circuit(conjugation_circ, pspec=pspec) - -# # Check that the layer -- pauli -- quasi-inverse circuit implements a Pauli. -# assert(_np.sum(_np.abs(telp_s - identity)) <= 1e-10) -# # Add the quasi inverse layer that we've constructed to the end of the quasi inverse circuit. -# quasi_inverse_circ.append(quasi_inverse_layer) - -# # now that we've completed the quasi inverse circuit we convert it to a Circuit object -# quasi_inverse_circ = _cir.Circuit(quasi_inverse_circ) - -# # Calculate the bit string that this mirror circuit should output, from the final telescoped Pauli. -# target_bitstring = ''.join(['1' if p == 2 else '0' for p in telp_p[n:]]) -# mirror_circuit = circ + central_pauli_circ + quasi_inverse_circ - -# return mirror_circuit, target_bitstring - def create_mirror_circuit(circ, pspec, circ_type='clifford+zxzxz'): """ @@ -313,305 +227,3 @@ def compute_gate_inverse(gate_label): mirror_circuit = _cir.Circuit(mc, line_labels=circ.line_labels) return mirror_circuit, target_bitstring - - -# #generate mirror circuits with pauli frame randomization. no random +pi needed -# #as we construct the quasi-inverse, we generate random pauli layers, and compile them into the unitaries -# #we'll need to recompute the angles needed for the z rotations - -# def create_nc_mirror_circuit(circ, pspec, circtype='Clifford+Gzr'): - -# assert(circtype == 'Clifford+Gzr' or circtype == 'Clifford') -# n = circ.width -# d = circ.depth -# pauli_labels = ['I', 'X', 'Y', 'Z'] -# qubits = circ.line_labels -# identity = _np.identity(2 * n, _np.int64) -# zrotname = 'Gzr' -# # qubit_labels = ['G{}'.format(i) for i in range(n)] - -# _, gate_inverse = pspec.compute_one_qubit_gate_relations() -# gate_inverse.update(pspec.compute_multiqubit_inversion_relations()) # add multiQ inverses -# #for gname in pspec.gate_names: -# # assert(gname in gate_inverse), \ -# # "%s gate does not have an inverse in the gate-set! MRB is not possible!" % gname - -# quasi_inverse_circ = [] - -# Xpi2layer = [_lbl.Label('Gc16', qubits[t]) for t in range(n)] -# c = circ.copy(editable=True) - -# #build the inverse -# d_ind = 0 -# while d_ind 0 the number of circuits generated so far is shown. - -# Returns -# ------- -# dict -# A dictionary containing the generated random circuits, the error-free outputs of the circuit, -# and the specification used to generate the circuits. The keys are: - -# - 'circuits'. A dictionary of the sampled circuits. The circuit with key(l,k) is the kth circuit -# at length l. - -# - 'probs'. A dictionary of the error-free *marginalized* probabilities for the "1" outcome of -# a computational basis measurement at the end of each circuit, with the standard input state. -# The ith element of this tuple corresponds to this probability for the qubit on the ith wire of -# the output circuit. - -# - 'qubitordering'. The ordering of the qubits in the 'target' tuples. - -# - 'spec'. A dictionary containing all of the parameters handed to this function, except `pspec`. -# This then specifies how the circuits where generated. -# """ -# experiment_dict = {} -# experiment_dict['spec'] = {} -# experiment_dict['spec']['depths'] = depths -# experiment_dict['spec']['circuits_per_length'] = circuits_per_length -# experiment_dict['spec']['sampler'] = sampler -# experiment_dict['spec']['samplerargs'] = samplerargs -# experiment_dict['spec']['addlocal'] = addlocal -# experiment_dict['spec']['lsargs'] = lsargs -# experiment_dict['spec']['descriptor'] = descriptor -# experiment_dict['spec']['createdby'] = 'extras.rb.sample.simultaneous_random_circuits_experiment' - -# if isinstance(structure, str): -# assert(structure == '1Q'), "The only default `structure` option is the string '1Q'" -# structure = tuple([(q,) for q in pspec.qubit_labels]) -# else: -# assert(isinstance(structure, list) or isinstance(structure, tuple)), \ -# "If not a string, `structure` must be a list or tuple." -# qubits_used = [] -# for qubit_labels in structure: -# assert(isinstance(qubit_labels, list) or isinstance( -# qubit_labels, tuple)), "SubsetQs must be a list or a tuple!" -# qubits_used = qubits_used + list(qubit_labels) -# assert(len(set(qubits_used)) == len(qubits_used)), \ -# "The qubits in the tuples/lists of `structure must all be unique!" - -# assert(set(qubits_used).issubset(set(pspec.qubit_labels))), \ -# "The qubits to benchmark must all be in the QubitProcessorSpec `pspec`!" - -# experiment_dict['spec']['structure'] = structure -# experiment_dict['circuits'] = {} -# experiment_dict['probs'] = {} -# experiment_dict['settings'] = {} - -# for lnum, l in enumerate(depths): -# if verbosity > 0: -# print('- Sampling {} circuits at length {} ({} of {} depths)'.format(circuits_per_length, l, -# lnum + 1, len(depths))) -# print(' - Number of circuits sampled = ', end='') -# for j in range(circuits_per_length): -# circuit, idealout = sample_simultaneous_random_circuit(pspec, l, structure=structure, sampler=sampler, -# samplerargs=samplerargs, addlocal=addlocal, -# lsargs=lsargs) - -# if (not set_isolated) and (not setcomplement_isolated): -# experiment_dict['circuits'][l, j] = circuit -# experiment_dict['probs'][l, j] = idealout -# experiment_dict['settings'][l, j] = { -# s: len(depths) + lnum * circuits_per_length + j for s in tuple(structure)} -# else: -# experiment_dict['circuits'][l, j] = {} -# experiment_dict['probs'][l, j] = {} -# experiment_dict['settings'][l, j] = {} -# experiment_dict['circuits'][l, j][tuple(structure)] = circuit -# experiment_dict['probs'][l, j][tuple(structure)] = idealout -# experiment_dict['settings'][l, j][tuple(structure)] = _get_setting(l, j, structure, depths, -# circuits_per_length, structure) -# if set_isolated: -# for subset_ind, subset in enumerate(structure): -# subset_circuit = circuit.copy(editable=True) -# #print(subset) -# for q in circuit.line_labels: -# if q not in subset: -# #print(subset_circuit, q) -# subset_circuit.replace_with_idling_line_inplace(q) -# subset_circuit.done_editing() -# experiment_dict['circuits'][l, j][(tuple(subset),)] = subset_circuit -# experiment_dict['probs'][l, j][(tuple(subset),)] = idealout[subset_ind] -# # setting = {} -# # for s in structure: -# # if s in subset: -# # setting[s] = len(depths) + lnum*circuits_per_length + j -# # else: -# # setting[s] = lnum -# experiment_dict['settings'][l, j][(tuple(subset),)] = _get_setting(l, j, (tuple(subset),), depths, -# circuits_per_length, structure) -# # print(subset) -# # print(_get_setting(l, j, subset, depths, circuits_per_length, structure)) - -# if setcomplement_isolated: -# for subset_ind, subset in enumerate(structure): -# subsetcomplement_circuit = circuit.copy(editable=True) -# for q in circuit.line_labels: -# if q in subset: -# subsetcomplement_circuit.replace_with_idling_line_inplace(q) -# subsetcomplement_circuit.done_editing() -# subsetcomplement = list(_copy.copy(structure)) -# subsetcomplement_idealout = list(_copy.copy(idealout)) -# del subsetcomplement[subset_ind] -# del subsetcomplement_idealout[subset_ind] -# subsetcomplement = tuple(subsetcomplement) -# subsetcomplement_idealout = tuple(subsetcomplement_idealout) -# experiment_dict['circuits'][l, j][subsetcomplement] = subsetcomplement_circuit -# experiment_dict['probs'][l, j][subsetcomplement] = subsetcomplement_idealout - -# # for s in structure: -# # if s in subsetcomplement: -# # setting[s] = len(depths) + lnum*circuits_per_length + j -# # else: -# # setting[s] = lnum -# experiment_dict['settings'][l, j][subsetcomplement] = _get_setting(l, j, subsetcomplement, depths, -# circuits_per_length, structure) - -# if verbosity > 0: print(j + 1, end=',') -# if verbosity > 0: print('') - -# return experiment_dict - - -# def create_exhaustive_independent_random_circuits_experiment(pspec, allowed_depths, circuits_per_subset, -# structure='1Q', -# sampler='Qelimination', samplerargs=[], descriptor='', -# verbosity=1, seed=None): -# """ -# Todo - -# Parameters -# ---------- -# pspec : QubitProcessorSpec -# The QubitProcessorSpec for the device that the circuit is being sampled for, which defines the -# "native" gate-set and the connectivity of the device. The returned circuit will be over -# the gates in `pspec`, and will respect the connectivity encoded by `pspec`. Note that `pspec` -# is always handed to the sampler, as the first argument of the sampler function (this is only -# of importance when not using an in-built sampler). - -# allowed_depths : -# - -# circuits_per_subset : -# - -# structure : str or tuple. -# Defines the "structure" of the simultaneous circuit. TODO : more details. - -# sampler : str or function, optional -# If a string, this should be one of: {'pairingQs', 'Qelimination', 'co2Qgates', 'local'}. -# Except for 'local', this corresponds to sampling layers according to the sampling function -# in rb.sampler named circuit_layer_by* (with * replaced by 'sampler'). For 'local', this -# corresponds to sampling according to rb.sampler.circuit_layer_of_oneQgates. -# If `sampler` is a function, it should be a function that takes as the first argument a -# QubitProcessorSpec, and returns a random circuit layer as a list of gate Label objects. Note that -# the default 'Qelimination' is not necessarily the most useful in-built sampler, but it is the -# only sampler that requires no parameters beyond the QubitProcessorSpec *and* works for arbitrary -# connectivity devices. See the docstrings for each of these samplers for more information. - -# samplerargs : list, optional -# A list of arguments that are handed to the sampler function, specified by `sampler`. -# The first argument handed to the sampler is `pspec`, the second argument is `qubit_labels`, -# and `samplerargs` lists the remaining arguments handed to the sampler. This is not -# optional for some choices of `sampler`. - -# descriptor : str, optional -# A description of the experiment being generated. Stored in the output dictionary. - -# verbosity : int, optional -# How much output to sent to stdout. - -# seed : int, optional -# Seed for RNG - -# Returns -# ------- -# dict -# """ -# experiment_dict = {} -# experiment_dict['spec'] = {} -# experiment_dict['spec']['allowed_depths'] = allowed_depths -# experiment_dict['spec']['circuits_per_subset'] = circuits_per_subset -# experiment_dict['spec']['sampler'] = sampler -# experiment_dict['spec']['samplerargs'] = samplerargs -# experiment_dict['spec']['descriptor'] = descriptor - -# if isinstance(structure, str): -# assert(structure == '1Q'), "The only default `structure` option is the string '1Q'" -# structure = tuple([(q,) for q in pspec.qubit_labels]) -# else: -# assert(isinstance(structure, list) or isinstance(structure, tuple)), \ -# "If not a string, `structure` must be a list or tuple." -# qubits_used = [] -# for qubit_labels in structure: -# assert(isinstance(qubit_labels, list) or isinstance( -# qubit_labels, tuple)), "SubsetQs must be a list or a tuple!" -# qubits_used = qubits_used + list(qubit_labels) -# assert(len(set(qubits_used)) == len(qubits_used)), \ -# "The qubits in the tuples/lists of `structure must all be unique!" - -# assert(set(qubits_used).issubset(set(pspec.qubit_labels))), \ -# "The qubits to benchmark must all be in the QubitProcessorSpec `pspec`!" - -# rand_state = _np.random.RandomState(seed) # OK if seed is None - -# experiment_dict['spec']['structure'] = structure -# experiment_dict['circuits'] = {} -# experiment_dict['probs'] = {} - -# if circuits_per_subset**len(structure) >> 10000: -# print("Warning: {} circuits are going to be generated by this function!".format( -# circuits_per_subset**len(structure))) - -# circuits = {} - -# for ssQs_ind, qubit_labels in enumerate(structure): -# circuits[qubit_labels] = [] -# for i in range(circuits_per_subset): -# l = allowed_depths[rand_state.randint(len(allowed_depths))] -# circuits[qubit_labels].append(create_random_circuit(pspec, l, qubit_labels=qubit_labels, -# sampler=sampler, samplerargs=samplerargs)) - -# experiment_dict['subset_circuits'] = circuits - -# parallel_circuits = {} -# it = [range(circuits_per_subset) for i in range(len(structure))] -# for setting_comb in _itertools.product(*it): -# pcircuit = _cir.Circuit(num_lines=0, editable=True) -# for ssQs_ind, qubit_labels in enumerate(structure): -# pcircuit.tensor_circuit_inplace(circuits[qubit_labels][setting_comb[ssQs_ind]]) -# pcircuit.done_editing() # TIM: is this indented properly? -# parallel_circuits[setting_comb] = pcircuit - -# experiment_dict['circuits'] = parallel_circuits - -# return experiment_dict - def create_direct_rb_circuit(pspec, clifford_compilations, length, qubit_labels=None, sampler='Qelimination', samplerargs=None, addlocal=False, lsargs=None, randomizeout=True, cliffordtwirl=True, @@ -1578,564 +1005,6 @@ def create_direct_rb_circuit(pspec, clifford_compilations, length, qubit_labels= return outcircuit, idealout -#### Commented out as all of this functionality should be reproducable using simulataneous experiment designs applied -#### to DirectRB experiment designs. -# def sample_simultaneous_direct_rb_circuit(pspec, clifford_compilations, length, structure='1Q', -# sampler='Qelimination', -# samplerargs=[], addlocal=False, lsargs=[], randomizeout=True, -# cliffordtwirl=True, conditionaltwirl=True, citerations=20, compilerargs=[], -# partitioned=False, seed=1234): -# """ -# Generates a simultaneous "direct randomized benchmarking" (DRB) circuit. - -# DRB is the protocol introduced in arXiv:1807.07975 (2018). An n-qubit DRB circuit consists of -# (1) a circuit the prepares a uniformly random stabilizer state; (2) a length-l circuit -# (specified by `length`) consisting of circuit layers sampled according to some user-specified -# distribution (specified by `sampler`), (3) a circuit that maps the output of the preceeding -# circuit to a computational basis state. See arXiv:1807.07975 (2018) for further details. Todo : -# what SDRB is. - -# Parameters -# ---------- -# pspec : QubitProcessorSpec -# The QubitProcessorSpec for the device that the circuit is being sampled for, which defines the -# "native" gate-set and the connectivity of the device. The returned DRB circuit will be over -# the gates in `pspec`, and will respect the connectivity encoded by `pspec`. Note that `pspec` -# is always handed to the sampler, as the first argument of the sampler function (this is only -# of importance when not using an in-built sampler for the "core" of the DRB circuit). Unless -# `qubit_labels` is not None, the circuit is sampled over all the qubits in `pspec`. - -# clifford_compilations : dict -# A dictionary with the potential keys `'absolute'` and `'paulieq'` and corresponding -# :class:`CompilationRules` values. These compilation rules specify how to compile the -# "native" gates of `pspec` into Clifford gates. - -# length : int -# The "direct RB length" of the circuit, which is closely related to the circuit depth. It -# must be an integer >= 0. Unless `addlocal` is True, it is the depth of the "core" random -# circuit, sampled according to `sampler`, specified in step (2) above. If `addlocal` is True, -# each layer in the "core" circuit sampled according to "sampler` is followed by a layer of -# 1-qubit gates, with sampling specified by `lsargs` (and the first layer is proceeded by a -# layer of 1-qubit gates), and so the circuit of step (2) is length 2*`length` + 1. - -# structure : str or tuple, optional -# todo. - -# sampler : str or function, optional -# If a string, this should be one of: {'pairingQs', 'Qelimination', 'co2Qgates', 'local'}. -# Except for 'local', this corresponds to sampling layers according to the sampling function -# in rb.sampler named circuit_layer_by* (with * replaced by 'sampler'). For 'local', this -# corresponds to sampling according to rb.sampler.circuit_layer_of_oneQgates [which is not -# a valid form of sampling for n-qubit DRB, but is not explicitly forbidden in this function]. -# If `sampler` is a function, it should be a function that takes as the first argument a -# QubitProcessorSpec, and returns a random circuit layer as a list of gate Label objects. Note that -# the default 'Qelimination' is not necessarily the most useful in-built sampler, but it is -# the only sampler that requires no parameters beyond the QubitProcessorSpec *and* works for arbitrary -# connectivity devices. See the docstrings for each of these samplers for more information. - -# samplerargs : list, optional -# A list of arguments that are handed to the sampler function, specified by `sampler`. -# The first argument handed to the sampler is `pspec`, the second argument is `qubit_labels`, -# and `samplerargs` lists the remaining arguments handed to the sampler. This is not -# optional for some choices of `sampler`. - -# addlocal : bool, optional -# Whether to follow each layer in the "core" circuit, sampled according to `sampler` with -# a layer of 1-qubit gates. - -# lsargs : list, optional -# Only used if addlocal is True. A list of optional arguments handed to the 1Q gate -# layer sampler circuit_layer_by_oneQgate(). Specifies how to sample 1Q-gate layers. - -# randomizeout : bool, optional -# If False, the ideal output of the circuit (the "success" or "survival" outcome) is the all-zeros -# bit string. If True, the ideal output of the circuit is randomized to a uniformly random bit-string. -# This setting is useful for, e.g., detecting leakage/loss/measurement-bias etc. - -# cliffordtwirl : bool, optional -# Wether to begin the circuit with a sequence that generates a random stabilizer state. For -# standard DRB this should be set to True. There are a variety of reasons why it is better -# to have this set to True. - -# conditionaltwirl : bool, optional -# DRB only requires that the initial/final sequences of step (1) and (3) create/measure -# a uniformly random / particular stabilizer state, rather than implement a particular unitary. -# step (1) and (3) can be achieved by implementing a uniformly random Clifford gate and the -# unique inversion Clifford, respectively. This is implemented if `conditionaltwirl` is False. -# However, steps (1) and (3) can be implemented much more efficiently than this: the sequences -# of (1) and (3) only need to map a particular input state to a particular output state, -# if `conditionaltwirl` is True this more efficient option is chosen -- this is option corresponds -# to "standard" DRB. (the term "conditional" refers to the fact that in this case we essentially -# implementing a particular Clifford conditional on a known input). - -# citerations : int, optional -# Some of the stabilizer state / Clifford compilation algorithms in pyGSTi (including the default -# algorithms) are randomized, and the lowest-cost circuit is chosen from all the circuit generated -# in the iterations of the algorithm. This is the number of iterations used. The time required to -# generate a DRB circuit is linear in `citerations`. Lower-depth / lower 2-qubit gate count -# compilations of steps (1) and (3) are important in order to successfully implement DRB on as many -# qubits as possible. - -# compilerargs : list, optional -# A list of arguments that are handed to the compile_stabilier_state/measurement()functions (or the -# compile_clifford() function if `conditionaltwirl `is False). This includes all the optional -# arguments of these functions *after* the `iterations` option (set by `citerations`). For most -# purposes the default options will be suitable (or at least near-optimal from the compilation methods -# in-built into pyGSTi). See the docstrings of these functions for more information. - -# partitioned : bool, optional -# If False, a single circuit is returned consisting of the full circuit. If True, three circuits -# are returned in a list consisting of: (1) the stabilizer-prep circuit, (2) the core random circuit, -# (3) the pre-measurement circuit. In that case the full circuit is obtained by appended (2) to (1) -# and then (3) to (1). - -# seed: int, optional -# Seed for RNG - -# Returns -# ------- -# Circuit or list of Circuits -# If partioned is False, a random DRB circuit sampled as specified. If partioned is True, a list of -# three circuits consisting of (1) the stabilizer-prep circuit, (2) the core random circuit, -# (3) the pre-measurement circuit. In that case the full circuit is obtained by appended (2) to (1) -# and then (3) to (1) [except in the case of cliffordtwirl=False, when it is a list of two circuits]. -# Tuple -# A length-n tuple of integers in [0,1], corresponding to the error-free outcome of the -# circuit. Always all zeros if `randomizeout` is False. The ith element of the tuple -# corresponds to the error-free outcome for the qubit labelled by: the ith element of -# `qubit_labels`, if `qubit_labels` is not None; the ith element of `pspec.qubit_labels`, otherwise. -# In both cases, the ith element of the tuple corresponds to the error-free outcome for the -# qubit on the ith wire of the output circuit. -# """ -# if isinstance(structure, str): -# assert(structure == '1Q'), "The only default `structure` option is the string '1Q'" -# structure = tuple([(q,) for q in pspec.qubit_labels]) -# n = pspec.num_qubits -# else: -# assert(isinstance(structure, list) or isinstance(structure, tuple) -# ), "If not a string, `structure` must be a list or tuple." -# qubits_used = [] -# for qubit_labels in structure: -# assert(isinstance(qubit_labels, list) or isinstance( -# qubit_labels, tuple)), "SubsetQs must be a list or a tuple!" -# qubits_used = qubits_used + list(qubit_labels) -# assert(len(set(qubits_used)) == len(qubits_used) -# ), "The qubits in the tuples/lists of `structure must all be unique!" - -# assert(set(qubits_used).issubset(set(pspec.qubit_labels)) -# ), "The qubits to benchmark must all be in the QubitProcessorSpec `pspec`!" -# n = len(qubits_used) - -# for qubit_labels in structure: -# subgraph = pspec.qubit_graph.subgraph(list(qubit_labels)) # or pspec.compute_clifford_2Q_connectivity? -# assert(subgraph.is_connected_graph()), "Each subset of qubits in `structure` must be connected!" - -# rand_state = _np.random.RandomState(seed) # OK if seed is None - -# # Creates a empty circuit over no wires -# circuit = _cir.Circuit(num_lines=0, editable=True) - -# s_rc_dict = {} -# p_rc_dict = {} -# circuit_dict = {} - -# for qubit_labels in structure: -# qubit_labels = tuple(qubit_labels) -# # Sample a random circuit of "native gates" over this set of qubits, with the -# # specified sampling. -# subset_circuit = create_random_circuit(pspec=pspec, length=length, qubit_labels=qubit_labels, sampler=sampler, -# samplerargs=samplerargs, addlocal=addlocal, lsargs=lsargs, -# rand_state=rand_state) -# circuit_dict[qubit_labels] = subset_circuit -# # find the symplectic matrix / phase vector this circuit implements. -# s_rc_dict[qubit_labels], p_rc_dict[qubit_labels] = _symp.symplectic_rep_of_clifford_circuit( -# subset_circuit, pspec=pspec) -# # Tensors this circuit with the current circuit -# circuit.tensor_circuit_inplace(subset_circuit) - -# # Creates empty circuits over no wires -# inversion_circuit = _cir.Circuit(num_lines=0, editable=True) -# if cliffordtwirl: -# initial_circuit = _cir.Circuit(num_lines=0, editable=True) - -# for qubit_labels in structure: -# qubit_labels = tuple(qubit_labels) -# subset_n = len(qubit_labels) -# # If we are clifford twirling, we do an initial random circuit that is either a uniformly random -# # cliffor or creates a uniformly random stabilizer state from the standard input. -# if cliffordtwirl: - -# # Sample a uniformly random Clifford. -# s_initial, p_initial = _symp.random_clifford(subset_n, rand_state=rand_state) -# # Find the composite action of this uniformly random clifford and the random circuit. -# s_composite, p_composite = _symp.compose_cliffords(s_initial, p_initial, s_rc_dict[qubit_labels], -# p_rc_dict[qubit_labels]) - -# # If conditionaltwirl we do a stabilizer prep (a conditional Clifford). -# if conditionaltwirl: -# subset_initial_circuit = _cmpl.compile_stabilizer_state(s_initial, p_initial, pspec, -# clifford_compilations.get('absolute', None), -# clifford_compilations.get('paulieq', None), -# qubit_labels, -# citerations, *compilerargs, -# rand_state=rand_state) -# # If not conditionaltwirl, we do a full random Clifford. -# else: -# subset_initial_circuit = _cmpl.compile_clifford(s_initial, p_initial, pspec, -# clifford_compilations.get('absolute', None), -# clifford_compilations.get('paulieq', None), -# qubit_labels, citerations, -# *compilerargs, rand_state=rand_state) - -# initial_circuit.tensor_circuit_inplace(subset_initial_circuit) - -# # If we are not Clifford twirling, we just copy the effect of the random circuit as the effect -# # of the "composite" prep + random circuit (as here the prep circuit is the null circuit). -# else: -# s_composite = _copy.deepcopy(s_rc_dict[qubit_labels]) -# p_composite = _copy.deepcopy(p_rc_dict[qubit_labels]) - -# if conditionaltwirl: -# # If we want to randomize the expected output then randomize the p vector, otherwise -# # it is left as p. Note that, unlike with compile_clifford, we don't invert (s,p) -# # before handing it to the stabilizer measurement function. -# if randomizeout: p_for_measurement = _symp.random_phase_vector(s_composite, subset_n, -# rand_state=rand_state) -# else: p_for_measurement = p_composite -# subset_inversion_circuit = _cmpl.compile_stabilizer_measurement( -# s_composite, p_for_measurement, pspec, -# clifford_compilations.get('absolute', None), -# clifford_compilations.get('paulieq', None), -# qubit_labels, citerations, *compilerargs, -# rand_state=rand_state) -# else: -# # Find the Clifford that inverts the circuit so far. We -# s_inverse, p_inverse = _symp.inverse_clifford(s_composite, p_composite) -# # If we want to randomize the expected output then randomize the p_inverse vector, otherwise -# # do not. -# if randomizeout: p_for_inversion = _symp.random_phase_vector(s_inverse, subset_n, rand_state=rand_state) -# else: p_for_inversion = p_inverse -# # Compile the Clifford. -# subset_inversion_circuit = _cmpl.compile_clifford(s_inverse, p_for_inversion, pspec, -# clifford_compilations.get('absolute', None), -# clifford_compilations.get('paulieq', None), -# qubit_labels, citerations, *compilerargs, -# rand_state=rand_state) - -# inversion_circuit.tensor_circuit_inplace(subset_inversion_circuit) - -# inversion_circuit.done_editing() - -# if cliffordtwirl: -# full_circuit = initial_circuit.copy(editable=True) -# full_circuit.append_circuit_inplace(circuit) -# full_circuit.append_circuit_inplace(inversion_circuit) -# else: -# full_circuit = _copy.deepcopy(circuit) -# full_circuit.append_circuit_inplace(inversion_circuit) - -# full_circuit.done_editing() - -# # Find the expected outcome of the circuit. -# s_out, p_out = _symp.symplectic_rep_of_clifford_circuit(full_circuit, pspec=pspec) -# if conditionaltwirl: # s_out is not always the identity with a conditional twirl, -# # only conditional on prep/measure. -# assert(_np.array_equal(s_out[:n, n:], _np.zeros((n, n), _np.int64))), "Compiler has failed!" -# else: assert(_np.array_equal(s_out, _np.identity(2 * n, _np.int64))), "Compiler has failed!" - -# # Find the ideal output of the circuit. -# s_inputstate, p_inputstate = _symp.prep_stabilizer_state(n, zvals=None) -# s_outstate, p_outstate = _symp.apply_clifford_to_stabilizer_state(s_out, p_out, s_inputstate, p_inputstate) -# idealout = [] -# for qubit_labels in structure: -# subset_idealout = [] -# for q in qubit_labels: -# qind = circuit.line_labels.index(q) -# measurement_out = _symp.pauli_z_measurement(s_outstate, p_outstate, qind) -# bit = measurement_out[1] -# assert(bit == 0 or bit == 1), "Ideal output is not a computational basis state!" -# if not randomizeout: -# assert(bit == 0), "Ideal output is not the all 0s computational basis state!" -# subset_idealout.append(int(bit)) -# idealout.append(tuple(subset_idealout)) -# idealout = tuple(idealout) - -# if not partitioned: outcircuit = full_circuit -# else: -# if cliffordtwirl: outcircuit = [initial_circuit, circuit, inversion_circuit] -# else: outcircuit = [circuit, inversion_circuit] - -# return outcircuit, idealout - - -# def create_simultaneous_direct_rb_experiment(pspec, depths, circuits_per_length, structure='1Q', -# sampler='Qelimination', -# samplerargs=[], addlocal=False, lsargs=[], randomizeout=False, -# cliffordtwirl=True, conditionaltwirl=True, citerations=20, -# compilerargs=[], -# partitioned=False, set_isolated=True, setcomplement_isolated=False, -# descriptor='A set of simultaneous DRB experiments', verbosity=1, -# seed=1234): -# """ -# Generates a simultaneous "direct randomized benchmarking" (DRB) experiments (circuits). - -# DRB is the protocol introduced in arXiv:1807.07975 (2018). -# An n-qubit DRB circuit consists of (1) a circuit the prepares a uniformly random stabilizer state; -# (2) a length-l circuit (specified by `length`) consisting of circuit layers sampled according to -# some user-specified distribution (specified by `sampler`), (3) a circuit that maps the output of -# the preceeding circuit to a computational basis state. See arXiv:1807.07975 (2018) for further -# details. In simultaneous DRB ...... . - -# Parameters -# ---------- -# pspec : QubitProcessorSpec -# The QubitProcessorSpec for the device that the circuit is being sampled for, which defines the -# "native" gate-set and the connectivity of the device. The returned DRB circuit will be over -# the gates in `pspec`, and will respect the connectivity encoded by `pspec`. Note that `pspec` -# is always handed to the sampler, as the first argument of the sampler function (this is only -# of importance when not using an in-built sampler for the "core" of the DRB circuit). Unless -# `qubit_labels` is not None, the circuit is sampled over all the qubits in `pspec`. - -# depths : int -# The set of "direct RB depths" for the circuits. The DRB depths must be integers >= 0. -# Unless `addlocal` is True, the DRB length is the depth of the "core" random circuit, -# sampled according to `sampler`, specified in step (2) above. If `addlocal` is True, -# each layer in the "core" circuit sampled according to "sampler` is followed by a layer of -# 1-qubit gates, with sampling specified by `lsargs` (and the first layer is proceeded by a -# layer of 1-qubit gates), and so the circuit of step (2) is length 2*`length` + 1. - -# circuits_per_length : int -# The number of (possibly) different DRB circuits sampled at each length. - -# structure : str or tuple. -# Defines the "structure" of the simultaneous DRB experiment. TODO : more details. - -# sampler : str or function, optional -# If a string, this should be one of: {'pairingQs', 'Qelimination', 'co2Qgates', 'local'}. -# Except for 'local', this corresponds to sampling layers according to the sampling function -# in rb.sampler named circuit_layer_by* (with * replaced by 'sampler'). For 'local', this -# corresponds to sampling according to rb.sampler.circuit_layer_of_oneQgates [which is not -# a valid form of sampling for n-qubit DRB, but is not explicitly forbidden in this function]. -# If `sampler` is a function, it should be a function that takes as the first argument a -# QubitProcessorSpec, and returns a random circuit layer as a list of gate Label objects. Note that -# the default 'Qelimination' is not necessarily the most useful in-built sampler, but it is the -# only sampler that requires no parameters beyond the QubitProcessorSpec *and* works for arbitrary -# connectivity devices. See the docstrings for each of these samplers for more information. - -# samplerargs : list, optional -# A list of arguments that are handed to the sampler function, specified by `sampler`. -# The first argument handed to the sampler is `pspec`, the second argument is `qubit_labels`, -# and `samplerargs` lists the remaining arguments handed to the sampler. This is not -# optional for some choices of `sampler`. - -# addlocal : bool, optional -# Whether to follow each layer in the "core" circuits, sampled according to `sampler` with -# a layer of 1-qubit gates. - -# lsargs : list, optional -# Only used if addlocal is True. A list of optional arguments handed to the 1Q gate -# layer sampler circuit_layer_by_oneQgate(). Specifies how to sample 1Q-gate layers. - -# randomizeout : bool, optional -# If False, the ideal output of the circuits (the "success" or "survival" outcome) is the all-zeros -# bit string. If True, the ideal output of each circuit is randomized to a uniformly random bit-string. -# This setting is useful for, e.g., detecting leakage/loss/measurement-bias etc. - -# cliffordtwirl : bool, optional -# Wether to begin the circuit with a sequence that generates a random stabilizer state. For -# standard DRB this should be set to True. There are a variety of reasons why it is better -# to have this set to True. - -# conditionaltwirl : bool, optional -# DRB only requires that the initial/final sequences of step (1) and (3) create/measure -# a uniformly random / particular stabilizer state, rather than implement a particular unitary. -# step (1) and (3) can be achieved by implementing a uniformly random Clifford gate and the -# unique inversion Clifford, respectively. This is implemented if `conditionaltwirl` is False. -# However, steps (1) and (3) can be implemented much more efficiently than this: the sequences -# of (1) and (3) only need to map a particular input state to a particular output state, -# if `conditionaltwirl` is True this more efficient option is chosen -- this is option corresponds -# to "standard" DRB. (the term "conditional" refers to the fact that in this case we essentially -# implementing a particular Clifford conditional on a known input). - -# citerations : int, optional -# Some of the stabilizer state / Clifford compilation algorithms in pyGSTi (including the default -# algorithms) are randomized, and the lowest-cost circuit is chosen from all the circuits generated -# in the iterations of the algorithm. This is the number of iterations used. The time required to -# generate a DRB circuit is linear in `citerations`. Lower-depth / lower 2-qubit gate count -# compilations of steps (1) and (3) are important in order to successfully implement DRB on as many -# qubits as possible. - -# compilerargs : list, optional -# A list of arguments that are handed to the compile_stabilier_state/measurement()functions (or the -# compile_clifford() function if `conditionaltwirl `is False). This includes all the optional -# arguments of these functions *after* the `iterations` option (set by `citerations`). For most -# purposes the default options will be suitable (or at least near-optimal from the compilation methods -# in-built into pyGSTi). See the docstrings of these functions for more information. - -# partitioned : bool, optional -# If False, each circuit is returned as a single full circuit. If True, each circuit is returned as -# a list of three circuits consisting of: (1) the stabilizer-prep circuit, (2) the core random circuit, -# (3) the pre-measurement circuit. In that case the full circuit is obtained by appended (2) to (1) -# and then (3) to (1). - -# set_isolated : bool, optional -# Todo - -# setcomplement_isolated : bool, optional -# Todo - -# descriptor : str, optional -# A description of the experiment being generated. Stored in the output dictionary. - -# verbosity : int, optional -# If > 0 the number of circuits generated so far is shown. - -# seed: int, optional -# Seed for RNG - -# Returns -# ------- -# Circuit or list of Circuits -# If partioned is False, a random DRB circuit sampled as specified. If partioned is True, a list of -# three circuits consisting of (1) the stabilizer-prep circuit, (2) the core random circuit, -# (3) the pre-measurement circuit. In that case the full circuit is obtained by appended (2) to (1) -# and then (3) to (1). -# Tuple -# A length-n tuple of integers in [0,1], corresponding to the error-free outcome of the -# circuit. Always all zeros if `randomizeout` is False. The ith element of the tuple -# corresponds to the error-free outcome for the qubit labelled by: the ith element of -# `qubit_labels`, if `qubit_labels` is not None; the ith element of `pspec.qubit_labels`, otherwise. -# In both cases, the ith element of the tuple corresponds to the error-free outcome for the -# qubit on the ith wire of the output circuit. -# dict -# A dictionary containing the generated RB circuits, the error-free outputs of the circuit, -# and the specification used to generate the circuits. The keys are: - -# - 'circuits'. A dictionary of the sampled circuits. The circuit with key(l,k) is the kth circuit -# at DRB length l. - -# - 'idealout'. A dictionary of the error-free outputs of the circuits as tuples. The tuple with -# key(l,k) is the error-free output of the (l,k) circuit. The ith element of this tuple corresponds -# to the error-free outcome for the qubit on the ith wire of the output circuit and/or the ith element -# of the list at the key 'qubitordering'. These tuples will all be (0,0,0,...) when `randomizeout` is -# False - -# - 'qubitordering'. The ordering of the qubits in the 'idealout' tuples. - -# - 'spec'. A dictionary containing all of the parameters handed to this function, except `pspec`. -# This then specifies how the circuits where generated. -# """ - -# experiment_dict = {} -# experiment_dict['spec'] = {} -# experiment_dict['spec']['depths'] = depths -# experiment_dict['spec']['circuits_per_length'] = circuits_per_length -# experiment_dict['spec']['sampler'] = sampler -# experiment_dict['spec']['samplerargs'] = samplerargs -# experiment_dict['spec']['addlocal'] = addlocal -# experiment_dict['spec']['lsargs'] = lsargs -# experiment_dict['spec']['randomizeout'] = randomizeout -# experiment_dict['spec']['cliffordtwirl'] = cliffordtwirl -# experiment_dict['spec']['conditionaltwirl'] = conditionaltwirl -# experiment_dict['spec']['citerations'] = citerations -# experiment_dict['spec']['compilerargs'] = compilerargs -# experiment_dict['spec']['partitioned'] = partitioned -# experiment_dict['spec']['descriptor'] = descriptor -# experiment_dict['spec']['createdby'] = 'extras.rb.sample.simultaneous_direct_rb_experiment' - -# #rand_state = _np.random.RandomState(seed) # OK if seed is None - -# if isinstance(structure, str): -# assert(structure == '1Q'), "The only default `structure` option is the string '1Q'" -# structure = tuple([(q,) for q in pspec.qubit_labels]) -# else: -# assert(isinstance(structure, list) or isinstance(structure, tuple)), \ -# "If not a string, `structure` must be a list or tuple." -# qubits_used = [] -# for qubit_labels in structure: -# assert(isinstance(qubit_labels, list) or isinstance( -# qubit_labels, tuple)), "SubsetQs must be a list or a tuple!" -# qubits_used = qubits_used + list(qubit_labels) -# assert(len(set(qubits_used)) == len(qubits_used)), \ -# "The qubits in the tuples/lists of `structure must all be unique!" - -# assert(set(qubits_used).issubset(set(pspec.qubit_labels))), \ -# "The qubits to benchmark must all be in the QubitProcessorSpec `pspec`!" - -# experiment_dict['spec']['structure'] = structure -# experiment_dict['circuits'] = {} -# experiment_dict['target'] = {} -# experiment_dict['settings'] = {} - -# for qubit_labels in structure: -# subgraph = pspec.qubit_graph.subgraph(list(qubit_labels)) # or pspec.compute_clifford_2Q_connectivity? -# assert(subgraph.is_connected_graph()), "Each subset of qubits in `structure` must be connected!" - -# for lnum, l in enumerate(depths): -# lseed = seed + lnum * circuits_per_length -# if verbosity > 0: -# print('- Sampling {} circuits at DRB length {} ({} of {} depths) with seed {}'.format(circuits_per_length, -# l, lnum + 1, -# len(depths), lseed)) -# print(' - Number of circuits sampled = ', end='') -# for j in range(circuits_per_length): -# circuit, idealout = sample_simultaneous_direct_rb_circuit(pspec, l, structure=structure, sampler=sampler, -# samplerargs=samplerargs, addlocal=addlocal, -# lsargs=lsargs, randomizeout=randomizeout, -# cliffordtwirl=cliffordtwirl, -# conditionaltwirl=conditionaltwirl, -# citerations=citerations, -# compilerargs=compilerargs, -# partitioned=partitioned, -# seed=lseed + j) - -# if (not set_isolated) and (not setcomplement_isolated): -# experiment_dict['circuits'][l, j] = circuit -# experiment_dict['target'][l, j] = idealout - -# else: -# experiment_dict['circuits'][l, j] = {} -# experiment_dict['target'][l, j] = {} -# experiment_dict['settings'][l, j] = {} -# experiment_dict['circuits'][l, j][tuple(structure)] = circuit -# experiment_dict['target'][l, j][tuple(structure)] = idealout -# experiment_dict['settings'][l, j][tuple(structure)] = _get_setting(l, j, structure, depths, -# circuits_per_length, structure) - -# if set_isolated: -# for subset_ind, subset in enumerate(structure): -# subset_circuit = circuit.copy(editable=True) -# for q in circuit.line_labels: -# if q not in subset: -# subset_circuit.replace_with_idling_line_inplace(q) -# subset_circuit.done_editing() -# experiment_dict['circuits'][l, j][(tuple(subset),)] = subset_circuit -# experiment_dict['target'][l, j][(tuple(subset),)] = (idealout[subset_ind],) -# experiment_dict['settings'][l, j][(tuple(subset),)] = _get_setting(l, j, (tuple(subset),), depths, -# circuits_per_length, structure) - -# if setcomplement_isolated: -# for subset_ind, subset in enumerate(structure): -# subsetcomplement_circuit = circuit.copy(editable=True) -# for q in circuit.line_labels: -# if q in subset: -# subsetcomplement_circuit.replace_with_idling_line_inplace(q) -# subsetcomplement_circuit.done_editing() -# subsetcomplement = list(_copy.copy(structure)) -# subsetcomplement_idealout = list(_copy.copy(idealout)) -# del subsetcomplement[subset_ind] -# del subsetcomplement_idealout[subset_ind] -# subsetcomplement = tuple(subsetcomplement) -# subsetcomplement_idealout = tuple(subsetcomplement_idealout) -# experiment_dict['circuits'][l, j][subsetcomplement] = subsetcomplement_circuit -# experiment_dict['target'][l, j][subsetcomplement] = subsetcomplement_idealout -# experiment_dict['settings'][l, j][subsetcomplement] = _get_setting(l, j, subsetcomplement, depths, -# circuits_per_length, structure) - -# if verbosity > 0: print(j + 1, end=',') -# if verbosity > 0: print('') - -# return experiment_dict def _sample_clifford_circuit(pspec, clifford_compilations, qubit_labels, citerations, compilerargs, exact_compilation_key, srep_cache, rand_state): diff --git a/pygsti/algorithms/rbfit.py b/pygsti/algorithms/rbfit.py index ba54b3706..167157c73 100644 --- a/pygsti/algorithms/rbfit.py +++ b/pygsti/algorithms/rbfit.py @@ -17,132 +17,6 @@ from pygsti.tools import rbtools as _rbt -# Obsolute function to be deleted. -# def std_practice_analysis(RBSdataset, seed=[0.8, 0.95], bootstrap_samples=200, asymptote='std', rtype='EI', -# datatype='auto'): -# """ -# Implements a "standard practice" analysis of RB data. Fits the average success probabilities to the exponential -# decay A + Bp^m, using least-squares fitting, with (1) A fixed (as standard, to 1/2^n where n is the number of -# qubits the data is for), and (2) A, B and p all allowed to varying. Confidence intervals are also estimated using -# a standard non-parameteric boostrap. - -# Parameters -# ---------- -# RBSdataset : RBSummaryDataset -# An RBSUmmaryDataset containing the data to analyze - -# seed : list, optional -# Seeds for the fit of B and p (A is seeded to the asymptote defined by `asympote`). - -# bootstrap_samples : int, optional -# The number of samples in the bootstrap. - -# asymptote : str or float, optional -# The A value for the fitting to A + Bp^m with A fixed. If a string must be 'std', in -# in which case A is fixed to 1/2^n. - -# rtype : {'EI','AGI'}, optional -# The RB error rate rescaling convention. 'EI' results in RB error rates that are associated -# with the entanglement infidelity, which is the error probability with stochastic errors (and -# is equal to the diamond distance). 'AGI' results in RB error rates that are associated with -# average gate infidelity. - -# Returns -# ------- -# RBResults -# An object encapsulating the RB results (and data). - -# """ -# assert(datatype == 'raw' or datatype == 'adjusted' or datatype == 'auto'), "Unknown data type!" - -# if datatype == 'auto': -# if RBSdataset.datatype == 'hamming_distance_counts': -# datatype = 'adjusted' -# else: -# datatype = 'raw' - -# lengths = RBSdataset.lengths -# n = RBSdataset.num_qubits - -# if isinstance(asymptote, str): -# assert(asymptote == 'std'), "If `asympotote` is a string it must be 'std'!" -# if datatype == 'raw': -# asymptote = 1 / 2**n -# elif datatype == 'adjusted': -# asymptote = 1 / 4**n - -# if datatype == 'adjusted': -# ASPs = RBSdataset.adjusted_ASPs -# if datatype == 'raw': -# ASPs = RBSdataset.ASPs - -# FF_results, FAF_results = std_least_squares_fit(lengths, ASPs, n, seed=seed, asymptote=asymptote, -# ftype='full+FA', rtype=rtype) - -# parameters = ['A', 'B', 'p', 'r'] -# bootstraps_FF = {} -# bootstraps_FAF = {} - -# if bootstrap_samples > 0: - -# bootstraps_FF = {p: [] for p in parameters} -# bootstraps_FAF = {p: [] for p in parameters} -# failcount_FF = 0 -# failcount_FAF = 0 - -# # Add bootstrapped data, if neccessary. -# RBSdataset.add_bootstrapped_datasets(samples=bootstrap_samples) - -# for i in range(bootstrap_samples): - -# if datatype == 'adjusted': -# BS_ASPs = RBSdataset.bootstraps[i].adjusted_ASPs -# if datatype == 'raw': -# BS_ASPs = RBSdataset.bootstraps[i].ASPs - -# BS_FF_results, BS_FAF_results = std_least_squares_fit(lengths, BS_ASPs, n, seed=seed, -# asymptote=asymptote, ftype='full+FA', -# rtype=rtype) - -# if BS_FF_results['success']: -# for p in parameters: -# bootstraps_FF[p].append(BS_FF_results['estimates'][p]) -# else: -# failcount_FF += 1 -# if BS_FAF_results['success']: -# for p in parameters: -# bootstraps_FAF[p].append(BS_FAF_results['estimates'][p]) -# else: -# failcount_FAF += 1 - -# failrate_FF = failcount_FF / bootstrap_samples -# failrate_FAF = failcount_FAF / bootstrap_samples - -# std_FF = {p: _np.std(_np.array(bootstraps_FF[p])) for p in parameters} -# std_FAF = {p: _np.std(_np.array(bootstraps_FAF[p])) for p in parameters} - -# else: -# bootstraps_FF = None -# std_FF = None -# failrate_FF = None -# bootstraps_FAF = None -# std_FAF = None -# failrate_FAF = None - -# fits = {} -# fits['full'] = FitResults('LS', FF_results['seed'], rtype, FF_results['success'], FF_results['estimates'], -# FF_results['variable'], stds=std_FF, bootstraps=bootstraps_FF, -# bootstraps_failrate=failrate_FF) - -# fits['A-fixed'] = FitResults('LS', FAF_results['seed'], rtype, FAF_results['success'], -# FAF_results['estimates'], FAF_results['variable'], stds=std_FAF, -# bootstraps=bootstraps_FAF, bootstraps_failrate=failrate_FAF) - -# results = SimpleRBResults(RBSdataset, rtype, fits) - -# return results - - def std_least_squares_fit(lengths, asps, n, seed=None, asymptote=None, ftype='full', rtype='EI'): """ Implements a "standard" least-squares fit of RB data. @@ -469,111 +343,3 @@ def _from_nice_serialization(cls, state): return cls(state['fit_type'], state['seed'], state['r_type'], state['success'], state['estimates'], state['variable'], state['stds'], state['bootstraps'], state['bootstraps_failrate']) - -# Obsolute RB results class -# class SimpleRBResults(object): -# """ -# An object to contain the results of an RB analysis. - -# """ - -# def __init__(self, data, rtype, fits): -# """ -# Initialize an RBResults object. - -# Parameters -# ---------- -# data : RBSummaryDataset -# The RB summary data that the analysis was performed for. - -# rtype : {'IE','AGI'} -# The type of RB error rate, corresponding to different dimension-dependent -# re-scalings of (1-p), where p is the RB decay constant in A + B*p^m. - -# fits : dict -# A dictionary containing FitResults objects, obtained from one or more -# fits of the data (e.g., a fit with all A, B and p as free parameters and -# a fit with A fixed to 1/2^n). -# """ -# self.data = data -# self.rtype = rtype -# self.fits = fits - -# def plot(self, fitkey=None, decay=True, success_probabilities=True, size=(8, 5), ylim=None, xlim=None, -# legend=True, title=None, figpath=None): -# """ -# Plots RB data and, optionally, a fitted exponential decay. - -# Parameters -# ---------- -# fitkey : dict key, optional -# The key of the self.fits dictionary to plot the fit for. If None, will -# look for a 'full' key (the key for a full fit to A + Bp^m if the standard -# analysis functions are used) and plot this if possible. It otherwise checks -# that there is only one key in the dict and defaults to this. If there are -# multiple keys and none of them are 'full', `fitkey` must be specified when -# `decay` is True. - -# decay : bool, optional -# Whether to plot a fit, or just the data. - -# success_probabilities : bool, optional -# Whether to plot the success probabilities distribution, as a violin plot. (as well -# as the *average* success probabilities at each length). - -# size : tuple, optional -# The figure size - -# ylim, xlim : tuple, optional -# The x and y limits for the figure. - -# legend : bool, optional -# Whether to show a legend. - -# title : str, optional -# A title to put on the figure. - -# figpath : str, optional -# If specified, the figure is saved with this filename. -# """ - -# # Future : change to a plotly plot. -# try: import matplotlib.pyplot as _plt -# except ImportError: raise ValueError("This function requires you to install matplotlib!") - -# if decay and fitkey is None: -# allfitkeys = list(self.fits.keys()) -# if 'full' in allfitkeys: fitkey = 'full' -# else: -# assert(len(allfitkeys) == 1), \ -# "There are multiple fits and none have the key 'full'. Please specify the fit to plot!" -# fitkey = allfitkeys[0] - -# _plt.figure(figsize=size) -# _plt.plot(self.data.lengths, self.data.ASPs, 'o', label='Average success probabilities') - -# if decay: -# lengths = _np.linspace(0, max(self.data.lengths), 200) -# A = self.fits[fitkey].estimates['A'] -# B = self.fits[fitkey].estimates['B'] -# p = self.fits[fitkey].estimates['p'] -# _plt.plot(lengths, A + B * p**lengths, -# label='Fit, r = {:.2} +/- {:.1}'.format(self.fits[fitkey].estimates['r'], -# self.fits[fitkey].stds['r'])) - -# if success_probabilities: -# _plt.violinplot(list(self.data.success_probabilities), self.data.lengths, points=10, widths=1., -# showmeans=False, showextrema=False, showmedians=False) # , label='Success probabilities') - -# if title is not None: _plt.title(title) -# _plt.ylabel("Success probability") -# _plt.xlabel("RB sequence length $(m)$") -# _plt.ylim(ylim) -# _plt.xlim(xlim) - -# if legend: _plt.legend() - -# if figpath is not None: _plt.savefig(figpath, dpi=1000) -# else: _plt.show() - -# return diff --git a/pygsti/baseobjs/errorgenbasis.py b/pygsti/baseobjs/errorgenbasis.py index 97975ca1a..d3090405b 100644 --- a/pygsti/baseobjs/errorgenbasis.py +++ b/pygsti/baseobjs/errorgenbasis.py @@ -514,125 +514,3 @@ def intersection(self, other_basis): def difference(self, other_basis): return self.to_explicit_basis().difference(other_basis) - - -#OLD - maybe not needed? -#class LowWeightElementaryErrorgenBasis(ElementaryErrorgenBasis): -# """ -# Spanned by the elementary error generators of given type(s) (e.g. "Hamiltonian" and/or "other") -# and with elements corresponding to a `Basis`, usually of Paulis. -# """ -# -# def __init__(self, basis_1q, state_space, other_mode, max_ham_weight=None, max_other_weight=None, -# must_overlap_with_these_sslbls=None): -# self._basis_1q = basis_1q -# self._other_mode = other_mode -# self.state_space = state_space -# self._max_ham_weight = max_ham_weight -# self._max_other_weight = max_other_weight -# self._must_overlap_with_these_sslbls = must_overlap_with_these_sslbls -# -# assert(self.state_space.is_entirely_qubits), "FOGI only works for models containing just qubits (so far)" -# sslbls = self.state_space.sole_tensor_product_block_labels # all the model's state space labels -# self.sslbls = sslbls # the "support" of this space - the qubit labels -# -# self._cached_label_indices = None -# self._cached_labels_by_support = None -# self._cached_elements = None -# -# #Needed? -# # self.dim = len(self.labels) # TODO - update this so we don't always need to build labels -# # # (this defeats lazy building via property below) - we can just compute this, especially if -# # # not too fancy -# -# @property -# def labels(self): -# if self._cached_label_indices is None: -# -# def _basis_el_strs(possible_bels, wt): -# for els in _itertools.product(*([possible_bels] * wt)): -# yield ''.join(els) -# -# labels = {} -# all_bels = self.basis_1q.labels[1:] # assume first element is identity -# nontrivial_bels = self.basis_1q.labels[1:] # assume first element is identity -# -# max_weight = self._max_ham_weight if (self._max_ham_weight is not None) else len(self.sslbls) -# for weight in range(1, max_weight + 1): -# for support in _itertools.combinations(self.sslbls, weight): -# if (self._must_overlap_with_these_sslbls is not None -# and len(self._must_overlap_with_these_sslbls.intersection(support)) == 0): -# continue -# if support not in labels: labels[support] = [] # always True? -# labels[support].extend([('H', bel) for bel in _basis_el_strs(nontrivial_bels, weight)]) -# -# max_weight = self._max_other_weight if (self._max_other_weight is not None) else len(self.sslbls) -# if self._other_mode != "all": -# for weight in range(1, max_weight + 1): -# for support in _itertools.combinations(self.sslbls, weight): -# if (self._must_overlap_with_these_sslbls is not None -# and len(self._must_overlap_with_these_sslbls.intersection(support)) == 0): -# continue -# if support not in labels: labels[support] = [] -# labels[support].extend([('S', bel) for bel in _basis_el_strs(nontrivial_bels, weight)]) -# else: -# #This is messy code that relies on basis labels being single characters -- TODO improve(?) -# idle_char = self.basis_1q.labels[1:] # assume first element is identity -# assert(len(idle_char) == 1 and all([len(c) == 1 for c in nontrivial_bels])), \ -# "All basis el labels must be single chars for other_mode=='all'!" -# for support in _itertools.combinations(self.sslbls, max_weight): -# # Loop over all possible basis elements for this max-weight support, computing the actual support -# # of each one individually and appending it to the appropriate list -# for bel1 in _basis_el_strs(all_bels, max_weight): -# nonidle_indices1 = [i for i in range(max_weight) if bel1[i] != idle_char] -# for bel2 in _basis_el_strs(all_bels, max_weight): -# nonidle_indices2 = [i for i in range(max_weight) if bel2[i] != idle_char] -# nonidle_indices = list(sorted(set(nonidle_indices1) + set(nonidle_indices2))) -# bel1 = ''.join([bel1[i] for i in nonidle_indices]) # trim to actual support -# bel2 = ''.join([bel2[i] for i in nonidle_indices]) # trim to actual support -# actual_support = tuple([support[i] for i in nonidle_indices]) -# -# if (self._must_overlap_with_these_sslbls is not None -# and len(self._must_overlap_with_these_sslbls.intersection(actual_support)) == 0): -# continue -# -# if actual_support not in labels: labels[actual_support] = [] -# labels[actual_support].append(('S', bel1, bel2)) -# -# self._cached_labels_by_support = labels -# self._cached_label_indices = _collections.OrderedDict(((support_lbl, i) for i, support_lbl in enumerate( -# ((support, lbl) for support, lst in labels.items() for lbl in lst)))) -# -# return tuple(self._cached_label_indices.keys()) -# -# @property -# def element_supports_and_matrices(self): -# if self._cached_elements is None: -# self._cached_elements = tuple( -# ((support, _ot.lindblad_error_generator(elemgen_label, self.basis_1q, normalize=True, sparse=False)) -# for support, elemgen_label in self.labels)) -# return self._cached_elements -# -# def element_index(self, label): -# """ -# TODO: docstring -# """ -# if self._cached_label_indices is None: -# self.labels # triggers building of labels -# return self._cached_label_indices[label] -# -# @property -# def sslbls(self): -# """ The support of this errorgen space, e.g., the qubits where its elements may be nontrivial """ -# return self.sslbls -# -# def create_subbasis(self, must_overlap_with_these_sslbls, retain_max_weights=True): -# """ -# Create a sub-basis of this basis by including only the elements -# that overlap the given support (state space labels) -# """ -# #Note: can we reduce self.state_space? -# return CompleteErrorgenBasis(self._basis_1q, self.state_space, self._other_mode, -# self._max_ham_weight if retain_max_weights else None, -# self._max_other_weight if retain_max_weights else None, -# self._must_overlap_with_these_sslbls) diff --git a/pygsti/baseobjs/errorgenspace.py b/pygsti/baseobjs/errorgenspace.py index d1df666a4..ad71ad4c7 100644 --- a/pygsti/baseobjs/errorgenspace.py +++ b/pygsti/baseobjs/errorgenspace.py @@ -97,13 +97,3 @@ def normalize(self, norm_order=2): for j in range(self.vectors.shape[1]): sign = +1 if max(self.vectors[:, j]) >= -min(self.vectors[:, j]) else -1 self.vectors[:, j] /= sign * _np.linalg.norm(self.vectors[:, j], ord=norm_order) - - -#class LowWeightErrorgenSpace(ErrorgenSpace): -# """ -# Like a SimpleErrorgenSpace but spanned by only the elementary error generators corresponding to -# low-weight (up to some maximum weight) basis elements -# (so far, only Pauli-product bases work for this, since `Basis` objects don't keep track of each -# element's weight (?)). -# """ -# pass diff --git a/pygsti/baseobjs/polynomial.py b/pygsti/baseobjs/polynomial.py index 0045cabe5..4848c29b1 100644 --- a/pygsti/baseobjs/polynomial.py +++ b/pygsti/baseobjs/polynomial.py @@ -543,574 +543,6 @@ def to_rep(self): # , max_num_vars=None not needed anymore -- given at __init__ Polynomial = FASTPolynomial -# class SLOWPolynomial(dict): # REMOVE THIS CLASS (just for reference) -# """ -# Encapsulates a polynomial as a subclass of the standard Python dict. -# -# Variables are represented by integer indices, e.g. "2" means "x_2". -# Keys are tuples of variable indices and values are numerical -# coefficients (floating point or complex numbers). To specify a variable -# to some power, its index is repeated in the key-tuple. -# -# E.g. x_0^2 + 3*x_1 + 4 is stored as {(0,0): 1.0, (1,): 3.0, (): 4.0} -# -# Parameters -# ---------- -# coeffs : dict -# A dictionary of coefficients. Keys are tuples of integers that -# specify the polynomial term the coefficient value multiplies -# (see above). If None, the zero polynomial (no terms) is created. -# -# max_num_vars : int -# The maximum number of independent variables this polynomial can -# hold. Placing a limit on the number of variables allows more -# compact storage and efficient evaluation of the polynomial. -# """ -# -# @classmethod -# def _vindices_per_int(cls, max_num_vars): -# """ -# The number of variable indices that fit into a single int when there are at most `max_num_vars` variables. -# -# This quantity is needed to directly construct Polynomial representations -# and is thus useful internally for forward simulators. -# -# Parameters -# ---------- -# max_num_vars : int -# The maximum number of independent variables. -# -# Returns -# ------- -# int -# """ -# return int(_np.floor(PLATFORM_BITS / _np.log2(max_num_vars + 1))) -# -# @classmethod -# def from_rep(cls, rep): -# """ -# Creates a Polynomial from a "representation" (essentially a lite-version) of a Polynomial. -# -# Note: usually we only need to convert from full-featured Python objects -# to the lighter-weight "representation" objects. Polynomials are an -# exception, since as the results of probability computations they need -# to be converted back from "representation-form" to "full-form". -# -# Parameters -# ---------- -# rep : PolynomialRep -# A polynomial representation. -# -# Returns -# ------- -# Polynomial -# """ -# max_num_vars = rep.max_num_vars # one of the few/only cases where a rep -# # max_order = rep.max_order # needs to expose some python properties -# -# def int_to_vinds(indx_tup): -# ret = [] -# for indx in indx_tup: -# while indx != 0: -# nxt = indx // (max_num_vars + 1) -# i = indx - nxt * (max_num_vars + 1) -# ret.append(i - 1) -# indx = nxt -# #assert(len(ret) <= max_order) #TODO: is this needed anymore? -# return tuple(sorted(ret)) -# -# tup_coeff_dict = {int_to_vinds(k): val for k, val in rep.coeffs.items()} -# ret = cls(tup_coeff_dict) -# ret.fastpoly = FASTPolynomial.from_rep(rep) -# ret._check_fast_polynomial() -# return ret -# -# def __init__(self, coeffs=None, max_num_vars=100): -# """ -# Initializes a new Polynomial object (a subclass of dict). -# -# Internally (as a dict) a Polynomial represents variables by integer -# indices, e.g. "2" means "x_2". Keys are tuples of variable indices and -# values are numerical coefficients (floating point or complex numbers). -# A variable to a power > 1 has its index repeated in the key-tuple. -# -# E.g. x_0^2 + 3*x_1 + 4 is stored as `{(0,0): 1.0, (1,): 3.0, (): 4.0}` -# -# Parameters -# ---------- -# coeffs : dict -# A dictionary of coefficients. Keys are tuples of integers that -# specify the polynomial term the coefficient value multiplies -# (see above). If None, the zero polynomial (no terms) is created. -# -# max_num_vars : int -# The maximum number of independent variables this polynomial can -# hold. Placing a limit on the number of variables allows more -# compact storage and efficient evaluation of the polynomial. -# """ -# super(Polynomial, self).__init__() -# if coeffs is not None: -# self.update(coeffs) -# self.max_num_vars = max_num_vars -# self.fastpoly = FASTPolynomial(coeffs, max_num_vars) -# self._check_fast_polynomial() -# -# def _check_fast_polynomial(self, raise_err=True): -# """ -# Check that included FASTPolynomial has remained in-sync with this one. -# -# This is purely for debugging, to ensure that the FASTPolynomial -# class implements its operations correctly. -# -# Parameters -# ---------- -# raise_err : bool, optional -# Whether to raise an AssertionError if the check fails. -# -# Returns -# ------- -# bool -# Whether or not the check has succeeded (True if the -# fast and slow implementations are in sync). -# """ -# if set(self.fastpoly.coeffs.keys()) != set(self.keys()): -# print("FAST", self.fastpoly.coeffs, " != SLOW", dict(self)) -# if raise_err: assert(False), "STOP" -# return False -# for k in self.fastpoly.coeffs.keys(): -# if not _np.isclose(self.fastpoly.coeffs[k], self[k]): -# print("FAST", self.fastpoly.coeffs, " != SLOW", dict(self)) -# if raise_err: assert(False), "STOP" -# return False -# if self.max_num_vars != self.fastpoly.max_num_vars: -# print("#Var mismatch: FAST", self.fastpoly.max_num_vars, " != SLOW", self.max_num_vars) -# if raise_err: assert(False), "STOP" -# return False -# -# return True -# -# def deriv(self, wrt_param): -# """ -# Take the derivative of this Polynomial with respect to a single variable. -# -# The result is another Polynomial. -# -# E.g. deriv(x_2^3 + 3*x_1, wrt_param=2) = 3x^2 -# -# Parameters -# ---------- -# wrt_param : int -# The variable index to differentiate with respect to. -# E.g. "4" means "differentiate w.r.t. x_4". -# -# Returns -# ------- -# Polynomial -# """ -# dcoeffs = {} -# for ivar, coeff in self.items(): -# cnt = float(ivar.count(wrt_param)) -# if cnt > 0: -# l = list(ivar) -# del l[l.index(wrt_param)] -# dcoeffs[tuple(l)] = cnt * coeff -# -# ret = Polynomial(dcoeffs, self.max_num_vars) -# ret.fastpoly = self.fastpoly.deriv(wrt_param) -# ret._check_fast_polynomial() -# return ret -# -# def degree(self): -# """ -# The largest sum-of-exponents for any term (monomial) within this polynomial. -# -# E.g. for x_2^3 + x_1^2*x_0^2 has degree 4. -# -# Returns -# ------- -# int -# """ -# ret = max((len(k) for k in self), default=0) -# assert(self.fastpoly.degree == ret) -# self._check_fast_polynomial() -# return ret -# -# def evaluate(self, variable_values): -# """ -# Evaluate this polynomial for a given set of variable values. -# -# Parameters -# ---------- -# variable_values : array-like -# An object that can be indexed so that `variable_values[i]` gives the -# numerical value for i-th variable (x_i). -# -# Returns -# ------- -# float or complex -# Depending on the types of the coefficients and `variable_values`. -# """ -# #FUTURE: make this function smarter (Russian peasant) -# ret = 0 -# for ivar, coeff in self.items(): -# ret += coeff * _np.prod([variable_values[i] for i in ivar]) -# assert(_np.isclose(ret, self.fastpoly.evaluate(variable_values))) -# self._check_fast_polynomial() -# return ret -# -# def compact(self, complex_coeff_tape=True): -# """ -# Generate a compact form of this polynomial designed for fast evaluation. -# -# The resulting "tapes" can be evaluated using -# :func:`opcalc.bulk_eval_compact_polynomials`. -# -# Parameters -# ---------- -# complex_coeff_tape : bool, optional -# Whether the `ctape` returned array is forced to be of complex type. -# If False, the real part of all coefficients is taken (even if they're -# complex). -# -# Returns -# ------- -# vtape, ctape : numpy.ndarray -# These two 1D arrays specify an efficient means for evaluating this -# polynomial. -# """ -# #if force_complex: -# # iscomplex = True -# #else: -# # iscomplex = any([abs(_np.imag(x)) > 1e-12 for x in self.values()]) -# iscomplex = complex_coeff_tape -# -# nTerms = len(self) -# nVarIndices = sum(map(len, self.keys())) -# vtape = _np.empty(1 + nTerms + nVarIndices, _np.int64) # "variable" tape -# ctape = _np.empty(nTerms, complex if iscomplex else 'd') # "coefficient tape" -# -# i = 0 -# vtape[i] = nTerms; i += 1 -# for iTerm, k in enumerate(sorted(self.keys())): -# l = len(k) -# ctape[iTerm] = self[k] if iscomplex else _np.real(self[k]) -# vtape[i] = l; i += 1 -# vtape[i:i + l] = k; i += l -# assert(i == len(vtape)), "Logic Error!" -# fast_vtape, fast_ctape = self.fastpoly.compact(iscomplex) -# assert(_np.allclose(fast_vtape, vtape) and _np.allclose(fast_ctape, ctape)) -# self._check_fast_polynomial() -# return vtape, ctape -# -# def copy(self): -# """ -# Returns a copy of this polynomial. -# -# Returns -# ------- -# Polynomial -# """ -# fast_cpy = self.fastpoly.copy() -# ret = Polynomial(self, self.max_num_vars) -# ret.fastpoly = fast_cpy -# ret._check_fast_polynomial() -# return ret -# -# def map_indices(self, mapfn): -# """ -# Performs a bulk find & replace on this polynomial's variable indices. -# -# This is useful when the variable indices have external significance -# (like being the indices of a gate's parameters) and one want to convert -# to another set of indices (like a parent model's parameters). -# -# Parameters -# ---------- -# mapfn : function -# A function that takes as input an "old" variable-index-tuple -# (a key of this Polynomial) and returns the updated "new" -# variable-index-tuple. -# -# Returns -# ------- -# Polynomial -# """ -# ret = Polynomial({mapfn(k): v for k, v in self.items()}, self.max_num_vars) -# ret.fastpoly = self.fastpoly.map_indices(mapfn) -# self._check_fast_polynomial() -# ret._check_fast_polynomial() -# return ret -# -# def map_indices_inplace(self, mapfn): -# """ -# Performs an in-place find & replace on this polynomial's variable indices. -# -# This is useful when the variable indices have external significance -# (like being the indices of a gate's parameters) and one want to convert -# to another set of indices (like a parent model's parameters). -# -# Parameters -# ---------- -# mapfn : function -# A function that takes as input an "old" variable-index-tuple -# (a key of this Polynomial) and returns the updated "new" -# variable-index-tuple. -# -# Returns -# ------- -# None -# """ -# self._check_fast_polynomial() -# new_items = {mapfn(k): v for k, v in self.items()} -# self.clear() -# self.update(new_items) -# self.fastpoly.map_indices_inplace(mapfn) -# self._check_fast_polynomial() -# -# def mult(self, x): -# """ -# Multiplies this polynomial by another polynomial `x`. -# -# Parameters -# ---------- -# x : Polynomial -# The polynomial to multiply by. -# -# Returns -# ------- -# Polynomial -# The polynomial representing self * x. -# """ -# newpoly = Polynomial({}, self.max_num_vars) -# for k1, v1 in self.items(): -# for k2, v2 in x.items(): -# k = tuple(sorted(k1 + k2)) -# if k in newpoly: newpoly[k] += v1 * v2 -# else: newpoly[k] = v1 * v2 -# -# newpoly.fastpoly = self.fastpoly.mult(x.fastpoly) -# self._check_fast_polynomial() -# newpoly._check_fast_polynomial() -# return newpoly -# -# def scale(self, x): -# """ -# Scale this polynomial by `x` (multiply all coefficients by `x`). -# -# Parameters -# ---------- -# x : float or complex -# The value to scale by. -# -# Returns -# ------- -# None -# """ -# # assume a scalar that can multiply values -# for k in tuple(self.keys()): # I think the tuple() might speed things up (why?) -# self[k] *= x -# self.fastpoly.scale(x) -# self._check_fast_polynomial() -# -# def scalar_mult(self, x): -# """ -# Multiplies this polynomial by a scalar `x`. -# -# Parameters -# ---------- -# x : float or complex -# The value to multiply by. -# -# Returns -# ------- -# Polynomial -# """ -# newpoly = self.copy() -# newpoly.scale(x) -# self._check_fast_polynomial() -# newpoly._check_fast_polynomial() -# return newpoly -# -# def __str__(self): -# def fmt(x): -# if abs(_np.imag(x)) > 1e-6: -# if abs(_np.real(x)) > 1e-6: return "(%.3f+%.3fj)" % (x.real, x.imag) -# else: return "(%.3fj)" % x.imag -# else: return "%.3f" % x.real -# -# termstrs = [] -# sorted_keys = sorted(list(self.keys())) -# for k in sorted_keys: -# varstr = ""; last_i = None; n = 1 -# for i in sorted(k): -# if i == last_i: n += 1 -# elif last_i is not None: -# varstr += "x%d%s" % (last_i, ("^%d" % n) if n > 1 else "") -# n = 1 -# last_i = i -# if last_i is not None: -# varstr += "x%d%s" % (last_i, ("^%d" % n) if n > 1 else "") -# #print("DB: k = ",k, " varstr = ",varstr) -# if abs(self[k]) > 1e-4: -# termstrs.append("%s%s" % (fmt(self[k]), varstr)) -# -# self._check_fast_polynomial() -# if len(termstrs) > 0: -# return " + ".join(termstrs) -# else: return "0" -# -# def __repr__(self): -# return "Poly[ " + str(self) + " ]" -# -# def __add__(self, x): -# newpoly = self.copy() -# if isinstance(x, Polynomial): -# for k, v in x.items(): -# if k in newpoly: newpoly[k] += v -# else: newpoly[k] = v -# newpoly.fastpoly = self.fastpoly + x.fastpoly -# else: # assume a scalar that can be added to values -# for k in newpoly: -# newpoly[k] += x -# newpoly.fastpoly = self.fastpoly + x -# self._check_fast_polynomial() -# newpoly._check_fast_polynomial() -# return newpoly -# -# def __iadd__(self, x): -# """ Does self += x more efficiently """ -# if isinstance(x, Polynomial): -# for k, v in x.items(): -# try: -# self[k] += v -# except KeyError: -# self[k] = v -# self.fastpoly += x.fastpoly -# else: # assume a scalar that can be added to values -# for k in self: -# self[k] += x -# self.fastpoly += x -# self._check_fast_polynomial() -# return self -# -# def __mul__(self, x): -# #if isinstance(x, Polynomial): -# # newpoly = Polynomial() -# # for k1,v1 in self.items(): -# # for k2,v2 in x.items(): -# # k = tuple(sorted(k1+k2)) -# # if k in newpoly: newpoly[k] += v1*v2 -# # else: newpoly[k] = v1*v2 -# #else: -# # # assume a scalar that can multiply values -# # newpoly = self.copy() -# # for k in newpoly: -# # newpoly[k] *= x -# #return newpoly -# if isinstance(x, Polynomial): -# ret = self.mult(x) -# else: # assume a scalar that can multiply values -# ret = self.scalar_mult(x) -# self._check_fast_polynomial() -# ret._check_fast_polynomial() -# return ret -# -# def __rmul__(self, x): -# return self.__mul__(x) -# -# def __imul__(self, x): -# self._check_fast_polynomial() -# if isinstance(x, Polynomial): -# x._check_fast_polynomial() -# newcoeffs = {} -# for k1, v1 in self.items(): -# for k2, v2 in x.items(): -# k = tuple(sorted(k1 + k2)) -# if k in newcoeffs: newcoeffs[k] += v1 * v2 -# else: newcoeffs[k] = v1 * v2 -# self.clear() -# self.update(newcoeffs) -# self.fastpoly *= x.fastpoly -# self._check_fast_polynomial() -# else: -# self.scale(x) -# self._check_fast_polynomial() -# return self -# -# def __pow__(self, n): -# ret = Polynomial({(): 1.0}, self.max_num_vars) # max_order updated by mults below -# cur = self -# for i in range(int(_np.floor(_np.log2(n))) + 1): -# rem = n % 2 # gets least significant bit (i-th) of n -# if rem == 1: ret *= cur # add current power of x (2^i) if needed -# cur = cur * cur # current power *= 2 -# n //= 2 # shift bits of n right -# ret.fastpoly = self.fastpoly ** n -# ret._check_fast_polynomial() -# self._check_fast_polynomial() -# return ret -# -# def __copy__(self): -# ret = self.copy() -# ret._check_fast_polynomial() -# self._check_fast_polynomial() -# return ret -# -# def to_rep(self): -# """ -# Construct a representation of this polynomial. -# -# "Representations" are lightweight versions of objects used to improve -# the efficiency of intensely computational tasks. Note that Polynomial -# representations must have the same `max_order` and `max_num_vars` in -# order to interact with each other (add, multiply, etc.). -# -# Parameters -# ---------- -# max_num_vars : int, optional -# The maximum number of variables the represenatation is allowed to -# have (x_0 to x_(`max_num_vars-1`)). This sets the maximum allowed -# variable index within the representation. -# -# Returns -# ------- -# PolynomialRep -# """ -# # Set max_num_vars (determines based on coeffs if necessary) -# max_num_vars = self.max_num_vars -# default_max_vars = 0 if len(self) == 0 else \ -# max([(max(k) + 1 if k else 0) for k in self.keys()]) -# if max_num_vars is None: -# max_num_vars = default_max_vars -# else: -# assert(default_max_vars <= max_num_vars) -# -# vindices_per_int = Polynomial._vindices_per_int(max_num_vars) -# -# def vinds_to_int(vinds): -# """ Convert tuple index of ints to single int given max_numvars """ -# ints_in_key = int(_np.ceil(len(vinds) / vindices_per_int)) -# ret_tup = [] -# for k in range(ints_in_key): -# ret = 0; m = 1 -# for i in vinds[k * vindices_per_int:(k + 1) * vindices_per_int]: # last tuple index=most significant -# assert(i < max_num_vars), "Variable index exceed maximum!" -# ret += (i + 1) * m -# m *= max_num_vars + 1 -# assert(ret >= 0), "vinds = %s -> %d!!" % (str(vinds), ret) -# ret_tup.append(ret) -# return tuple(ret_tup) -# -# int_coeffs = {vinds_to_int(k): v for k, v in self.items()} -# -# # (max_num_vars+1) ** vindices_per_int <= 2**PLATFORM_BITS, so: -# # vindices_per_int * log2(max_num_vars+1) <= PLATFORM_BITS -# vindices_per_int = int(_np.floor(PLATFORM_BITS / _np.log2(max_num_vars + 1))) -# self._check_fast_polynomial() -# -# return _PolynomialRep(int_coeffs, max_num_vars, vindices_per_int) - - def bulk_load_compact_polynomials(vtape, ctape, keep_compact=False, max_num_vars=100): """ Create a list of Polynomial objects from a "tape" of their compact versions. diff --git a/pygsti/data/datacomparator.py b/pygsti/data/datacomparator.py index 5b8f38b99..f5e9e266e 100644 --- a/pygsti/data/datacomparator.py +++ b/pygsti/data/datacomparator.py @@ -49,11 +49,6 @@ def _loglikelihood(p_list, n_list): output += _xlogy(n_list[i], pVal) return output -# Only used by the rectify data function, which is commented out, -# so this is also commented out. -# def loglikelihoodRatioObj(alpha,n_list_list,dof): -# return _np.abs(dof - _loglikelihood_ratio(alpha*n_list_list)) - def _loglikelihood_ratio(n_list_list): """ diff --git a/pygsti/data/hypothesistest.py b/pygsti/data/hypothesistest.py index 87046853a..985b2e9ac 100644 --- a/pygsti/data/hypothesistest.py +++ b/pygsti/data/hypothesistest.py @@ -253,13 +253,6 @@ def _initialize_to_weighted_holms_test(self): self.passing_graph[hind, :] = _np.ones(len(self.hypotheses), float) / (len(self.hypotheses) - 1) self.passing_graph[hind, hind] = 0. - # def _check_permissible(self): - # """ - # Todo - # """ - # # Todo : test that the graph is acceptable. - # return True - def add_pvalues(self, pvalues): """ Insert the p-values for the hypotheses. diff --git a/pygsti/evotypes/chp/statereps.py b/pygsti/evotypes/chp/statereps.py index 749f0ab06..7ed792363 100644 --- a/pygsti/evotypes/chp/statereps.py +++ b/pygsti/evotypes/chp/statereps.py @@ -77,25 +77,3 @@ def reps_have_changed(self): def actionable_staterep(self): state_rep = self.state_rep.actionable_staterep() return self.op_rep.acton(state_rep) - -#REMOVE -# def chp_ops(self, seed_or_state=None): -# return self.state_rep.chp_ops(seed_or_state=seed_or_state) \ -# + self.op_rep.chp_ops(seed_or_state=seed_or_state) - -# TODO: Untested, only support computational and composed for now -#class StateRepTensorProduct(StateRep): -# def __init__(self, factor_state_reps, state_space): -# self.factor_reps = factor_state_reps -# super(StateRepTensorProduct, self).__init__([], state_space) -# self.reps_have_changed() -# -# def reps_have_changed(self): -# chp_ops = [] -# current_iqubit = 0 -# for factor in self.factor_reps: -# local_to_tp_index = {str(iloc): str(itp) for iloc, itp in -# enumerate(range(current_iqubit, current_iqubit + factor.num_qubits))} -# chp_ops.extend([_update_chp_op(op, local_to_tp_index) for op in self.chp_ops]) -# current_iqubit += factor.num_qubits -# self.chp_ops = chp_ops diff --git a/pygsti/evotypes/statevec_slow/opreps.py b/pygsti/evotypes/statevec_slow/opreps.py index b60fadd51..817ad1e55 100644 --- a/pygsti/evotypes/statevec_slow/opreps.py +++ b/pygsti/evotypes/statevec_slow/opreps.py @@ -107,11 +107,6 @@ def __init__(self, name, basis, state_space): super(OpRepStandard, self).__init__(U, basis, state_space) -#class OpRepStochastic(OpRepDense): -# - maybe we could add this, but it wouldn't be a "dense" op here, -# perhaps we need to change API? - - class OpRepComposed(OpRep): # exactly the same as densitymx case def __init__(self, factor_op_reps, state_space): diff --git a/pygsti/extras/rb/dataset.py b/pygsti/extras/rb/dataset.py index 249c665df..3828970df 100644 --- a/pygsti/extras/rb/dataset.py +++ b/pygsti/extras/rb/dataset.py @@ -345,34 +345,3 @@ def add_bootstrapped_datasets(self, samples=1000): descriptor='data created from a non-parametric bootstrap') self.bootstraps.append(bootstrapped_dataset) - - # todo : add this back in. - # def create_smaller_dataset(self, numberofcircuits): - # """ - # Creates a new dataset that has discarded the data from all but the first `numberofcircuits` - # circuits at each length. - - # Parameters - # ---------- - # numberofcircuits : int - # The maximum number of circuits to keep at each length. - - # Returns - # ------- - # RBSummaryDataset - # A new dataset containing less data. - # """ - # newRBSdataset = _copy.deepcopy(self) - # for i in range(len(newRBSdataset.lengths)): - # if newRBSdataset.success_counts is not None: - # newRBSdataset.success_counts[i] = newRBSdataset.success_counts[i][:numberofcircuits] - # if newRBSdataset.success_probabilities is not None: - # newRBSdataset.success_probabilities[i] = newRBSdataset.success_probabilities[i][:numberofcircuits] - # if newRBSdataset.total_counts is not None: - # newRBSdataset.total_counts[i] = newRBSdataset.total_counts[i][:numberofcircuits] - # if newRBSdataset.circuit_depths is not None: - # newRBSdataset.circuit_depths[i] = newRBSdataset.circuit_depths[i][:numberofcircuits] - # if newRBSdataset.circuit_twoQgate_counts is not None: - # newRBSdataset.circuit_twoQgate_counts[i] = newRBSdataset.circuit_twoQgate_counts[i][:numberofcircuits] - - # return newRBSdataset diff --git a/pygsti/extras/rb/io.py b/pygsti/extras/rb/io.py index a21b306f2..24e1201cd 100644 --- a/pygsti/extras/rb/io.py +++ b/pygsti/extras/rb/io.py @@ -734,79 +734,3 @@ def write_rb_summary_data_to_file(ds, filename): f.write(dataline + '\n') return - - -# # todo update this. -# def import_rb_summary_data(filenames, numqubits, type='auto', verbosity=1): -# """ -# todo : redo -# Reads in one or more text files of summary RB data into a RBSummaryDataset object. This format -# is appropriate for using the RB analysis functions. The datafile(s) should have one of the -# following two formats: - -# Format 1 (`is_counts_data` is True): - -# # The number of qubits -# The number of qubits (this line is optional if `num_qubits` is specified) -# # RB length // Success counts // Total counts // Circuit depth // Circuit two-qubit gate count -# Between 3 and 5 columns of data (the last two columns are expected only if `contains_circuit_data` is True). - -# Format 2 (`is_counts_data` is False): - -# # The number of qubits -# The number of qubits (this line is optional if `num_qubits` is specified) -# # RB length // Survival probabilities // Circuit depth // Circuit two-qubit gate count -# Between 2 and 4 columns of data (the last two columns are expected only if `contains_circuit_data` is True). - -# Parameters -# ---------- -# filenames : str or list. -# The filename, or a list of filenams, where the data is stored. The data from all files is read -# into a *single* dataset, so normally it should all be data for a single RB experiment. - -# is_counts_data : bool, optional -# Whether the data to be read contains success counts data (True) or survival probability data (False). - -# contains_circuit_data : bool, optional. -# Whether the data counts summary circuit data. - -# finitesampling : bool, optional -# Records in the RBSummaryDataset whether the survival probability for each circuit was obtained -# from finite sampling of the outcome probabilities. This is there to, by default, warn the user -# that any finite sampling cannot be taken into account if the input is not counts data (when -# they run any analysis on the data). But it is useful to be able to set this to False for simulated -# data obtained from perfect outcome sampling. - -# num_qubits : int, optional. -# The number of qubits the data is for. Must be specified if this isn't in the input file. - -# total_counts : int, optional -# If the data is success probability data, the total counts can optional be input here. - -# verbosity : int, optional -# The amount of print-to-screen. - -# Returns -# ------- -# None -# """ - - -# # todo : update this. -# def write_rb_summary_data_to_file(RBSdataset, filename): -# """ -# Writes an RBSSummaryDataset to file, in the format that can be read back in by -# import_rb_summary_data(). - -# Parameters -# ---------- -# RBSdataset : RBSummaryDataset -# The data to write to file. - -# filename : str -# The filename where the dataset should be written. - -# Returns -# ------- -# None -# """ diff --git a/pygsti/modelmembers/povms/denseeffect.py b/pygsti/modelmembers/povms/denseeffect.py deleted file mode 100644 index b0deb1e68..000000000 --- a/pygsti/modelmembers/povms/denseeffect.py +++ /dev/null @@ -1,142 +0,0 @@ - - -#UNUSED - I think we can remove this -#class DensePOVMEffect(_POVMEffect): -# """ -# A POVM effect vector that behaves like a numpy array. -# -# This class is the common base class for parameterizations of an effect vector -# that have a dense representation and can be accessed like a numpy array. -# -# Parameters -# ---------- -# vec : numpy.ndarray -# The effect vector as a dense numpy array. -# -# evotype : EvoType -# The evolution type. -# -# Attributes -# ---------- -# _base_1d : numpy.ndarray -# Direct access to the underlying 1D array. -# -# base : numpy.ndarray -# Direct access the the underlying data as column vector, -# i.e, a (dim,1)-shaped array. -# """ -# -# def __init__(self, vec, evotype): -# #dtype = complex if evotype == "statevec" else 'd' -# vec = _np.asarray(vec, dtype='d') -# vec.shape = (vec.size,) # just store 1D array flatten -# vec = _np.require(vec, requirements=['OWNDATA', 'C_CONTIGUOUS']) -# evotype = _Evotype.cast(evotype) -# rep = evotype.create_dense_effect_rep(vec) -# super(DensePOVMEffect, self).__init__(rep, evotype) -# assert(self._base_1d.flags['C_CONTIGUOUS'] and self._base_1d.flags['OWNDATA']) -# -# def to_dense(self, scratch=None): -# """ -# Return this effect vector as a (dense) numpy array. -# -# The memory in `scratch` maybe used when it is not-None. -# -# Parameters -# ---------- -# scratch : numpy.ndarray, optional -# scratch space available for use. -# -# Returns -# ------- -# numpy.ndarray -# """ -# #don't use scratch since we already have memory allocated -# return self._base_1d # *must* be a numpy array for Cython arg conversion -# -# @property -# def _base_1d(self): -# """ -# Direct access to the underlying 1D array. -# """ -# return self._rep.base -# -# @property -# def base(self): -# """ -# Direct access the the underlying data as column vector, i.e, a (dim,1)-shaped array. -# """ -# bv = self._base_1d.view() -# bv.shape = (bv.size, 1) # 'base' is by convention a (N,1)-shaped array -# return bv -# -# def __copy__(self): -# # We need to implement __copy__ because we defer all non-existing -# # attributes to self.base (a numpy array) which *has* a __copy__ -# # implementation that we don't want to use, as it results in just a -# # copy of the numpy array. -# cls = self.__class__ -# cpy = cls.__new__(cls) -# cpy.__dict__.update(self.__dict__) -# return cpy -# -# def __deepcopy__(self, memo): -# # We need to implement __deepcopy__ because we defer all non-existing -# # attributes to self.base (a numpy array) which *has* a __deepcopy__ -# # implementation that we don't want to use, as it results in just a -# # copy of the numpy array. -# cls = self.__class__ -# cpy = cls.__new__(cls) -# memo[id(self)] = cpy -# for k, v in self.__dict__.items(): -# setattr(cpy, k, _copy.deepcopy(v, memo)) -# return cpy -# -# #Access to underlying array -# def __getitem__(self, key): -# self.dirty = True -# return self.base.__getitem__(key) -# -# def __getslice__(self, i, j): -# self.dirty = True -# return self.__getitem__(slice(i, j)) # Called for A[:] -# -# def __setitem__(self, key, val): -# self.dirty = True -# return self.base.__setitem__(key, val) -# -# def __getattr__(self, attr): -# #use __dict__ so no chance for recursive __getattr__ -# if '_rep' in self.__dict__: # sometimes in loading __getattr__ gets called before the instance is loaded -# ret = getattr(self.base, attr) -# else: -# raise AttributeError("No attribute:", attr) -# self.dirty = True -# return ret -# -# #Mimic array -# def __pos__(self): return self.base -# def __neg__(self): return -self.base -# def __abs__(self): return abs(self.base) -# def __add__(self, x): return self.base + x -# def __radd__(self, x): return x + self.base -# def __sub__(self, x): return self.base - x -# def __rsub__(self, x): return x - self.base -# def __mul__(self, x): return self.base * x -# def __rmul__(self, x): return x * self.base -# def __truediv__(self, x): return self.base / x -# def __rtruediv__(self, x): return x / self.base -# def __floordiv__(self, x): return self.base // x -# def __rfloordiv__(self, x): return x // self.base -# def __pow__(self, x): return self.base ** x -# def __eq__(self, x): return self.base == x -# def __len__(self): return len(self.base) -# def __int__(self): return int(self.base) -# def __long__(self): return int(self.base) -# def __float__(self): return float(self.base) -# def __complex__(self): return complex(self.base) -# -# def __str__(self): -# s = "%s with dimension %d\n" % (self.__class__.__name__, self.dim) -# s += _mt.mx_to_string(self.to_dense(), width=4, prec=2) -# return s diff --git a/pygsti/modelpacks/stdtarget.py b/pygsti/modelpacks/stdtarget.py index 20f3587f8..9da24f5ae 100644 --- a/pygsti/modelpacks/stdtarget.py +++ b/pygsti/modelpacks/stdtarget.py @@ -42,127 +42,6 @@ def _get_cachefile_names(std_module, param_type, simulator, py_version): raise ValueError("No cache files used for param-type=%s" % param_type) -# XXX is this used? -# def _make_hs_cache_for_std_model(std_module, term_order, max_length, json_too=False, comm=None): -# """ -# A utility routine to for creating the term-based cache files for a standard module -# """ -# target_model = std_module.target_model() -# prep_fiducials = std_module.prepStrs -# effect_fiducials = std_module.effectStrs -# germs = std_module.germs -# -# x = 1 -# maxLengths = [] -# while(x <= max_length): -# maxLengths.append(x) -# x *= 2 -# -# listOfExperiments = _stdlists.create_lsgst_circuits( -# target_model, prep_fiducials, effect_fiducials, germs, maxLengths) -# -# mdl_terms = target_model.copy() -# mdl_terms.set_all_parameterizations("H+S terms") # CPTP terms? -# my_calc_cache = {} -# mdl_terms.sim = _TermFSim(mode="taylor", max_order=term_order, cache=my_calc_cache) -# -# comm_method = "scheduler" -# if comm is not None and comm.Get_size() > 1 and comm_method == "scheduler": -# from mpi4py import MPI # just needed for MPI.SOURCE below -# -# #Alternate: use rank0 as "scheduler" -# rank = 0 if (comm is None) else comm.Get_rank() -# nprocs = 1 if (comm is None) else comm.Get_size() -# N = len(listOfExperiments); cur_index = 0; active_workers = nprocs - 1 -# buf = _np.zeros(1, _np.int64) # use buffer b/c mpi4py .send/.recv seem buggy -# if rank == 0: -# # ** I am the scheduler ** -# # Give each "worker" rank an initial index to compute -# for i in range(1, nprocs): -# if cur_index == N: # there are more procs than items - just send -1 index to mean "you're done!" -# buf[0] = -1 -# comm.Send(buf, dest=i, tag=1) # tag == 1 => scheduler to worker -# active_workers -= 1 -# else: -# buf[0] = cur_index -# comm.Send(buf, dest=i, tag=1); cur_index += 1 -# -# # while there are active workers keep dishing out indices -# while active_workers > 0: -# comm.Recv(buf, source=MPI.ANY_SOURCE, tag=2) # worker requesting assignment -# worker_rank = buf[0] -# if cur_index == N: # nothing more to do: just send -1 index to mean "you're done!" -# buf[0] = -1 -# comm.Send(buf, dest=worker_rank, tag=1) # tag == 1 => scheduler to worker -# active_workers -= 1 -# else: -# buf[0] = cur_index -# comm.Send(buf, dest=worker_rank, tag=1) -# cur_index += 1 -# -# else: -# # ** I am a worker ** -# comm.Recv(buf, source=0, tag=1) -# index_to_compute = buf[0] -# -# while index_to_compute >= 0: -# print("Worker %d computing prob %d of %d" % (rank, index_to_compute, N)) -# t0 = _time.time() -# mdl_terms.probabilities(listOfExperiments[index_to_compute]) -# print("Worker %d finished computing prob %d in %.2fs" % (rank, index_to_compute, _time.time() - t0)) -# -# buf[0] = rank -# comm.Send(buf, dest=0, tag=2) # tag == 2 => worker requests next assignment -# comm.Recv(buf, source=0, tag=1) -# index_to_compute = buf[0] -# -# print("Rank %d at barrier" % rank) -# comm.barrier() # wait here until all workers and scheduler are done -# -# else: -# -# #divide up strings among ranks -# my_expList, _, _ = _mpit.distribute_indices(listOfExperiments, comm, False) -# rankStr = "" if (comm is None) else "Rank%d: " % comm.Get_rank() -# -# if comm is not None and comm.Get_rank() == 0: -# print("%d circuits divided among %d processors" % (len(listOfExperiments), comm.Get_size())) -# -# t0 = _time.time() -# for i, opstr in enumerate(my_expList): -# print("%s%.2fs: Computing prob %d of %d" % (rankStr, _time.time() - t0, i, len(my_expList))) -# mdl_terms.probabilities(opstr) -# #mdl_terms.bulk_probs(my_expList) # also fills cache, but allocs more mem at once -# -# py_version = 3 if (_sys.version_info > (3, 0)) else 2 -# key_fn, val_fn = _get_cachefile_names(std_module, "H+S terms", -# "termorder:%d" % term_order, py_version) -# _write_calccache(my_calc_cache, key_fn, val_fn, json_too, comm) -# -# if comm is None or comm.Get_rank() == 0: -# print("Completed in %.2fs" % (_time.time() - t0)) -# print("Num of Experiments = ", len(listOfExperiments)) -# -# #if comm is None: -# # calcc_list = [ my_calc_cache ] -# #else: -# # calcc_list = comm.gather(my_calc_cache, root=0) -# # -# #if comm is None or comm.Get_rank() == 0: -# # calc_cache = {} -# # for c in calcc_list: -# # calc_cache.update(c) -# # -# # print("Completed in %.2fs" % (_time.time()-t0)) -# # print("Cachesize = ",len(calc_cache)) -# # print("Num of Experiments = ", len(listOfExperiments)) -# # -# # py_version = 3 if (_sys.version_info > (3, 0)) else 2 -# # key_fn, val_fn = _get_cachefile_names(std_module, "H+S terms", -# # "termorder:%d" % term_order,py_version) -# # _write_calccache(calc_cache, key_fn, val_fn, json_too, comm) - - # XXX apparently only used from _make_hs_cache_for_std_model which itself looks unused def _write_calccache(calc_cache, key_fn, val_fn, json_too=False, comm=None): """ diff --git a/pygsti/protocols/vb.py b/pygsti/protocols/vb.py index 95f9d87e4..cecb7bfd5 100644 --- a/pygsti/protocols/vb.py +++ b/pygsti/protocols/vb.py @@ -622,17 +622,6 @@ def _get_circuit_values(icirc, circ, dsrow, idealout): return self._compute_dict(data, self.circuit_statistics, _get_circuit_values, for_passes="first") - # def compute_dscmp_data(self, data, dscomparator): - - # def get_dscmp_values(icirc, circ, dsrow, idealout): - # ret = {'tvds': dscomparator.tvds.get(circ, _np.nan), - # 'pvals': dscomparator.pVals.get(circ, _np.nan), - # 'jsds': dscomparator.jsds.get(circ, _np.nan), - # 'llrs': dscomparator.llrs.get(circ, _np.nan)} - # return ret - - # return self.compute_dict(data, "dscmpdata", self.dsmp_statistics, get_dscmp_values, for_passes="none") - def _compute_predicted_probs(self, data, model): """ Compute the predicted success probabilities of `model` given `data`. @@ -1041,180 +1030,3 @@ def _my_attributes_as_nameddict(self): "SummaryStatisticsResults.statistics dict should be populated with NamedDicts, not %s" % str(type(v)) stats[k] = v return stats - - -#BDB = ByDepthBenchmark -#VBGrid = VolumetricBenchmarkGrid -#VBResults = VolumetricBenchmarkingResults # shorthand - -#Add something like this? -#class PassStabilityTest(_proto.Protocol): -# pass - -# Commented out as we are not using this currently. todo: revive or delete this in the future. -# class VolumetricBenchmarkGrid(Benchmark): -# """ A protocol that creates an entire depth vs. width grid of volumetric benchmark values """ - -# def __init__(self, depths='all', widths='all', datatype='success_probabilities', -# paths='all', statistic='mean', aggregate=True, rescaler='auto', -# dscomparator=None, name=None): - -# super().__init__(name) -# self.postproc = VolumetricBenchmarkGridPP(depths, widths, datatype, paths, statistic, aggregate, self.name) -# self.dscomparator = dscomparator -# self.rescaler = rescaler - -# self.auxfile_types['postproc'] = 'protocolobj' -# self.auxfile_types['dscomparator'] = 'pickle' -# self.auxfile_types['rescaler'] = 'reset' # punt for now - fix later - -# def run(self, data, memlimit=None, comm=None): -# #Since we know that VolumetricBenchmark protocol objects Create a single results just fill -# # in data under the result object's 'volumetric_benchmarks' and 'failure_counts' -# # keys, and these are indexed by width and depth (even though each VolumetricBenchmark -# # only contains data for a single width), we can just "merge" the VB results of all -# # the underlying by-depth datas, so long as they're all for different widths. - -# #Then run resulting data normally, giving a results object -# # with "top level" dicts correpsonding to different paths -# VB = ByDepthBenchmark(self.postproc.depths, self.postproc.datatype, self.postproc.statistic, -# self.rescaler, self.dscomparator, name=self.name) -# separate_results = _proto.SimpleRunner(VB).run(data, memlimit, comm) -# pp_results = self.postproc.run(separate_results, memlimit, comm) -# pp_results.protocol = self -# return pp_results - - -# Commented out as we are not using this currently. todo: revive this in the future. -# class VolumetricBenchmark(_proto.ProtocolPostProcessor): -# """ A postprocesor that constructs a volumetric benchmark from existing results. """ - -# def __init__(self, depths='all', widths='all', datatype='polarization', -# statistic='mean', paths='all', edesigntype=None, aggregate=True, -# name=None): - -# super().__init__(name) -# self.depths = depths -# self.widths = widths -# self.datatype = datatype -# self.paths = paths if paths == 'all' else sorted(paths) # need to ensure paths are grouped by common prefix -# self.statistic = statistic -# self.aggregate = aggregate -# self.edesigntype = edesigntype - -# def run(self, results, memlimit=None, comm=None): -# data = results.data -# paths = results.get_tree_paths() if self.paths == 'all' else self.paths -# #Note: above won't work if given just a results object - needs a dir - -# #Process results -# #Merge/flatten the data from different paths into one depth vs width grid -# passnames = list(data.passes.keys()) if data.is_multipass() else [None] -# passresults = [] -# for passname in passnames: -# vb = _tools.NamedDict('Depth', 'int', None, None) -# fails = _tools.NamedDict('Depth', 'int', None, None) -# path_for_gridloc = {} -# for path in paths: -# #TODO: need to be able to filter based on widths... - maybe replace .update calls -# # with something more complicated when width != 'all' -# #print("Aggregating path = ", path) #TODO - show progress something like this later? - -# #Traverse path to get to root of VB data -# root = results -# for key in path: -# root = root[key] -# root = root.for_protocol.get(self.name, None) -# if root is None: continue - -# if passname: # then we expect final Results are MultiPassResults -# root = root.passes[passname] # now root should be a BenchmarkingResults -# assert(isinstance(root, VolumetricBenchmarkingResults)) -# if self.edesigntype is None: -# assert(isinstance(root.data.edesign, ByDepthDesign)), \ -# "All paths must lead to by-depth exp. design, not %s!" % str(type(root.data.edesign)) -# else: -# if not isinstance(root.data.edsign, self.edesigntype): -# continue - -# #Get the list of depths we'll extract from this (`root`) sub-results -# depths = root.data.edesign.depths if (self.depths == 'all') else \ -# filter(lambda d: d in self.depths, root.data.edesign.depths) -# width = len(root.data.edesign.qubit_labels) # sub-results contains only a single width -# if self.widths != 'all' and width not in self.widths: continue # skip this one - -# for depth in depths: -# if depth not in vb: # and depth not in fails -# vb[depth] = _tools.NamedDict('Width', 'int', 'Value', 'float') -# fails[depth] = _tools.NamedDict('Width', 'int', 'Value', None) -# path_for_gridloc[depth] = {} # just used for meaningful error message - -# if width in path_for_gridloc[depth]: -# raise ValueError(("Paths %s and %s both give data for depth=%d, width=%d! Set the `paths`" -# " argument of this VolumetricBenchmarkGrid to avoid this.") % -# (str(path_for_gridloc[depth][width]), str(path), depth, width)) - -# vb[depth][width] = root.volumetric_benchmarks[depth][width] -# fails[depth][width] = root.failure_counts[depth][width] -# path_for_gridloc[depth][width] = path - -# if self.statistic in ('minmin', 'maxmax') and not self.aggregate: -# self._update_vb_minmin_maxmax(vb) # aggregate now since we won't aggregate over passes - -# #Create Results -# results = VolumetricBenchmarkingResults(data, self) -# results.volumetric_benchmarks = vb -# results.failure_counts = fails -# passresults.append(results) - -# agg_fn = _get_statistic_function(self.statistic) - -# if self.aggregate and len(passnames) > 1: # aggregate pass data into a single set of qty dicts -# agg_vb = _tools.NamedDict('Depth', 'int', None, None) -# agg_fails = _tools.NamedDict('Depth', 'int', None, None) -# template = passresults[0].volumetric_benchmarks # to get widths and depths - -# for depth, template_by_width_data in template.items(): -# agg_vb[depth] = _tools.NamedDict('Width', 'int', 'Value', 'float') -# agg_fails[depth] = _tools.NamedDict('Width', 'int', 'Value', None) - -# for width in template_by_width_data.keys(): -# # ppd = "per pass data" -# vb_ppd = [r.volumetric_benchmarks[depth][width] for r in passresults] -# fail_ppd = [r.failure_counts[depth][width] for r in passresults] - -# successcount = 0 -# failcount = 0 -# for (successcountpass, failcountpass) in fail_ppd: -# successcount += successcountpass -# failcount += failcountpass -# agg_fails[depth][width] = (successcount, failcount) - -# if self.statistic == 'dist': -# agg_vb[depth][width] = [item for sublist in vb_ppd for item in sublist] -# else: -# agg_vb[depth][width] = agg_fn(vb_ppd) - -# aggregated_results = VolumetricBenchmarkingResults(data, self) -# aggregated_results.volumetric_benchmarks = agg_vb -# aggregated_results.failure_counts = agg_fails - -# if self.statistic in ('minmin', 'maxmax'): -# self._update_vb_minmin_maxmax(aggregated_results.qtys['volumetric_benchmarks']) -# return aggregated_results # replace per-pass results with aggregated results -# elif len(passnames) > 1: -# multipass_results = _proto.MultiPassResults(data, self) -# multipass_results.passes.update({passname: r for passname, r in zip(passnames, passresults)}) -# return multipass_results -# else: -# return passresults[0] - -# def _update_vb_minmin_maxmax(self, vb): -# for d in vb.keys(): -# for w in vb[d].keys(): -# for d2 in vb.keys(): -# for w2 in vb[d2].keys(): -# if self.statistic == 'minmin' and d2 <= d and w2 <= w and vb[d2][w2] < vb[d][w]: -# vb[d][w] = vb[d2][w2] -# if self.statistic == 'maxmax' and d2 >= d and w2 >= w and vb[d2][w2] > vb[d][w]: -# vb[d][w] = vb[d2][w2] diff --git a/pygsti/report/workspaceplots.py b/pygsti/report/workspaceplots.py index 2022be44f..ff1af4ed4 100644 --- a/pygsti/report/workspaceplots.py +++ b/pygsti/report/workspaceplots.py @@ -2165,12 +2165,6 @@ def _mx_fn_driftpv(plaq, x, y, instabilityanalyzertuple): def _mx_fn_drifttvd(plaq, x, y, instabilityanalyzertuple): return _ph.drift_maxtvd_matrices(plaq, instabilityanalyzertuple) -# future: delete this, or update it and added it back in. -# def _mx_fn_driftpwr(plaq, x, y, driftresults): -# return _ph.drift_maxpower_matrices(plaq, driftresults) - -# Begin "Additional sub-matrix" functions for adding more info to hover text - def _outcome_to_str(x): # same function as in writers.py if isinstance(x, str): return x @@ -3942,411 +3936,3 @@ def _create(self, rb_r, fitkey, decay, success_probabilities, ylim, xlim, #reverse order of data so z-ordering is nicer return ReportFigure(go.Figure(data=list(data), layout=layout), None, pythonVal) - - -#This older version on an RB decay plot contained a lot more theory detail -# compared with the current one - so we'll keep it around (commented out) -# in case we want to steal/revive pieces of it in the future. -#class OLDRandomizedBenchmarkingPlot(WorkspacePlot): -# """ Plot of RB Decay curve """ -# def __init__(self, ws, rb_r,xlim=None, ylim=None, -# fit='standard', Magesan_zeroth=False, Magesan_first=False, -# exact_decay=False,L_matrix_decay=False, Magesan_zeroth_SEB=False, -# Magesan_first_SEB=False, L_matrix_decay_SEB=False,mdl=False, -# target_model=False,group=False, group_to_model=None, norm='1to1', legend=True, -# title='Randomized Benchmarking Decay', scale=1.0): -# """ -# Plot RB decay curve, as a function of some the sequence length -# computed using the `gstyp` gate-label-set. -# -# Parameters -# ---------- -# rb_r : RBResults -# The RB results object containing all the relevant RB data. -# -# gstyp : str, optional -# The gate-label-set specifying which translation (i.e. strings with -# which operation labels) to use when computing sequence lengths. -# -# xlim : tuple, optional -# The x-range as (xmin,xmax). -# -# ylim : tuple, optional -# The y-range as (ymin,ymax). -# -# save_fig_path : str, optional -# If not None, the filename where the resulting plot should be saved. -# -# fitting : str, optional -# Allowed values are 'standard', 'first order' or 'all'. Specifies -# whether the zeroth or first order fitting model results are plotted, -# or both. -# -# Magesan_zeroth : bool, optional -# If True, plots the decay predicted by the 'zeroth order' theory of Magesan -# et al. PRA 85 042311 2012. Requires mdl and target_model to be specified. -# -# Magesan_first : bool, optional -# If True, plots the decay predicted by the 'first order' theory of Magesan -# et al. PRA 85 042311 2012. Requires mdl and target_model to be specified. -# -# Magesan_zeroth_SEB : bool, optional -# If True, plots the systematic error bound for the 'zeroth order' theory -# predicted decay. This is the region around the zeroth order decay in which -# the exact RB average survival probabilities are guaranteed to fall. -# -# Magesan_first_SEB : bool, optional -# As above, but for 'first order' theory. -# -# exact_decay : bool, optional -# If True, plots the exact RB decay, as predicted by the 'R matrix' theory -# of arXiv:1702.01853. Requires mdl and group to be specified -# -# L_matrix_decay : bool, optional -# If True, plots the RB decay, as predicted by the approximate 'L matrix' -# theory of arXiv:1702.01853. Requires mdl and target_model to be specified. -# -# L_matrix_decay_SEB : bool, optional -# If True, plots the systematic error bound for approximate 'L matrix' -# theory of arXiv:1702.01853. This is the region around predicted decay -# in which the exact RB average survival probabilities are guaranteed -# to fall. -# -# mdl : model, optional -# Required, if plotting any of the theory decays. The model for which -# these decays should be plotted for. -# -# target_model : Model, optional -# Required, if plotting certain theory decays. The target model for which -# these decays should be plotted for. -# -# group : MatrixGroup, optional -# Required, if plotting R matrix theory decay. The matrix group that mdl -# is an implementation of. -# -# group_to_model : dict, optional -# If not None, a dictionary that maps labels of group elements to labels -# of mdl. Only used if subset_sampling is not None. If subset_sampling is -# not None and the mdl and group elements have the same labels, this dictionary -# is not required. Otherwise it is necessary. -# -# norm : str, optional -# The norm used for calculating the Magesan theory bounds. -# -# legend : bool, optional -# Specifies whether a legend is added to the graph -# -# title : str, optional -# Specifies a title for the graph -# -# Returns -# ------- -# None -# """ -# # loc : str, optional -# # Specifies the location of the legend. -# super(RandomizedBenchmarkingPlot,self).__init__( -# ws, self._create, rb_r, xlim, ylim, fit, Magesan_zeroth, -# Magesan_first, exact_decay, L_matrix_decay, Magesan_zeroth_SEB, -# Magesan_first_SEB, L_matrix_decay_SEB, mdl, target_model, group, -# group_to_model, norm, legend, title, scale) -# -# def _create(self, rb_r, xlim, ylim, fit, Magesan_zeroth, -# Magesan_first, exact_decay, L_matrix_decay, Magesan_zeroth_SEB, -# Magesan_first_SEB, L_matrix_decay_SEB, mdl, target_model, group, -# group_to_model, norm, legend, title, scale): -# -# from ..extras.rb import rbutils as _rbutils -# #TODO: maybe move the computational/fitting part of this function -# # back to the RBResults object to reduce the logic (and dependence -# # on rbutils) here. -# -# #newplot = _plt.figure(figsize=(8, 4)) -# #newplotgca = newplot.gca() -# -# # Note: minus one to get xdata that discounts final Clifford-inverse -# xdata = _np.asarray(rb_r.results['lengths']) - 1 -# ydata = _np.asarray(rb_r.results['successes']) -# A = rb_r.results['A'] -# B = rb_r.results['B'] -# f = rb_r.results['f'] -# if fit == 'first order': -# C = rb_r.results['C'] -# pre_avg = rb_r.pre_avg -# -# if (Magesan_zeroth_SEB is True) and (Magesan_zeroth is False): -# print("As Magesan_zeroth_SEB is True, Setting Magesan_zeroth to True\n") -# Magesan_zeroth = True -# if (Magesan_first_SEB is True) and (Magesan_first is False): -# print("As Magesan_first_SEB is True, Setting Magesan_first to True\n") -# Magesan_first = True -# -# if (Magesan_zeroth is True) or (Magesan_first is True): -# if (mdl is False) or (target_model is False): -# raise ValueError("To plot Magesan et al theory decay curves a model" + -# " and a target model is required.") -# else: -# MTP = _rbutils.Magesan_theory_parameters(mdl, target_model, -# success_outcomelabel=rb_r.success_outcomelabel, -# norm=norm,d=rb_r.d) -# f_an = MTP['p'] -# A_an = MTP['A'] -# B_an = MTP['B'] -# A1_an = MTP['A1'] -# B1_an = MTP['B1'] -# C1_an = MTP['C1'] -# delta = MTP['delta'] -# -# if exact_decay is True: -# if (mdl is False) or (group is False): -# raise ValueError("To plot the exact decay curve a model" + -# " and the target group are required.") -# else: -# mvalues,ASPs = _rbutils.exact_rb_asps(mdl,group,max(xdata),m_min=1,m_step=1, -# d=rb_r.d, group_to_model=group_to_model, -# success_outcomelabel=rb_r.success_outcomelabel) -# -# if L_matrix_decay is True: -# if (mdl is False) or (target_model is False): -# raise ValueError("To plot the L matrix theory decay curve a model" + -# " and a target model is required.") -# else: -# mvalues, LM_ASPs, LM_ASPs_SEB_lower, LM_ASPs_SEB_upper = \ -# _rbutils.L_matrix_asps(mdl,target_model,max(xdata),m_min=1,m_step=1,d=rb_r.d, -# success_outcomelabel=rb_r.success_outcomelabel, error_bounds=True) -# -# xlabel = 'Sequence length' -# -# data = [] # list of traces -# data.append( go.Scatter( -# x = xdata, y = ydata, -# mode = 'markers', -# marker = dict( -# color = "rgb(0,0,0)", -# size = 6 if pre_avg else 3 -# ), -# name = 'Averaged RB data' if pre_avg else 'RB data', -# )) -# -# if fit=='standard' or fit=='first order': -# fit_label_1='Fit' -# fit_label_2='Fit' -# color2 = "black" -# -# theory_color2 = "green" -# theory_fill2 = "rgba(0,128,0,0.1)" -# if Magesan_zeroth is True and Magesan_first is True: -# theory_color2 = "magenta" -# theory_fill2 = "rgba(255,0,255,0.1)" -# -# if fit=='standard': -# data.append( go.Scatter( -# x = _np.arange(max(xdata)), -# y = _rbutils.standard_fit_function(_np.arange(max(xdata)),A,B,f), -# mode = 'lines', -# line = dict(width=1, color="black"), -# name = fit_label_1, -# showlegend=legend, -# )) -# -# if fit=='first order': -# data.append( go.Scatter( -# x = _np.arange(max(xdata)), -# y = _rbutils.first_order_fit_function(_np.arange(max(xdata)),A,B,C,f), -# mode = 'lines', -# line = dict(width=1, color=color2), -# name = fit_label_2, -# showlegend=legend, -# )) -# -# if Magesan_zeroth is True: -# data.append( go.Scatter( -# x = _np.arange(max(xdata)), -# y = _rbutils.standard_fit_function(_np.arange(max(xdata)),A_an,B_an,f_an), -# mode = 'lines', -# line = dict(width=2, color="green", dash='dash'), -# name = '0th order theory', -# showlegend=legend, -# )) -# -# if Magesan_zeroth_SEB is True: -# data.append( go.Scatter( -# x = _np.arange(max(xdata)), -# y = _rbutils.seb_upper( -# _rbutils.standard_fit_function(_np.arange(max(xdata)),A_an,B_an,f_an), -# _np.arange(max(xdata)), delta, order='zeroth'), -# mode = 'lines', -# line = dict(width=0.5, color="green"), -# name = '0th order bound', -# fill='tonexty', -# fillcolor='rgba(0,128,0,0.1)', -# showlegend=False, -# )) -# data.append( go.Scatter( -# x = _np.arange(max(xdata)), -# y = _rbutils.seb_lower( -# _rbutils.standard_fit_function(_np.arange(max(xdata)),A_an,B_an,f_an), -# _np.arange(max(xdata)), delta, order='zeroth'), -# mode = 'lines', -# line = dict(width=0.5, color="green"), -# name = '0th order bound', -# showlegend=False, -# )) -# -# -# if Magesan_first is True: -# data.append( go.Scatter( -# x = _np.arange(max(xdata)), -# y = _rbutils.first_order_fit_function(_np.arange(max(xdata)),A1_an,B1_an,C1_an,f_an), -# mode = 'lines', -# line = dict(width=2, color=theory_color2, dash='dash'), -# name = '1st order theory', -# showlegend=legend, -# )) -# -# if Magesan_first_SEB is True: -# data.append( go.Scatter( -# x = _np.arange(max(xdata)), -# y = _rbutils.seb_upper( -# _rbutils.first_order_fit_function(_np.arange(max(xdata)),A1_an,B1_an,C1_an,f_an), -# _np.arange(max(xdata)), delta, order='first'), -# mode = 'lines', -# line = dict(width=0.5, color=theory_color2), #linewidth=4? -# name = '1st order bound', -# fill='tonexty', -# fillcolor=theory_fill2, -# showlegend=False, -# )) -# data.append( go.Scatter( -# x = _np.arange(max(xdata)), -# y = _rbutils.seb_lower( -# _rbutils.first_order_fit_function(_np.arange(max(xdata)),A1_an,B1_an,C1_an,f_an), -# _np.arange(max(xdata)), delta, order='first'), -# mode = 'lines', -# line = dict(width=0.5, color=theory_color2), -# name = '1st order bound', -# showlegend=False, -# )) -# -# -# if exact_decay is True: -# data.append( go.Scatter( -# x = mvalues, -# y = ASPs, -# mode = 'lines', -# line = dict(width=2, color="blue",dash='dash'), -# name = 'Exact decay', -# showlegend=legend, -# )) -# -# if L_matrix_decay is True: -# data.append( go.Scatter( -# x = mvalues, -# y = LM_ASPs, -# mode = 'lines', -# line = dict(width=2, color="cyan",dash='dash'), -# name = 'L matrix decay', -# showlegend=legend, -# )) -# if L_matrix_decay_SEB is True: -# data.append( go.Scatter( -# x = mvalues, -# y = LM_ASPs_SEB_upper, -# mode = 'lines', -# line = dict(width=0.5, color="cyan"), -# name = 'LM bound', -# fill='tonexty', -# fillcolor='rgba(0,255,255,0.1)', -# showlegend=False, -# )) -# data.append( go.Scatter( -# x = mvalues, -# y = LM_ASPs_SEB_lower, -# mode = 'lines', -# line = dict(width=0.5, color="cyan"), -# name = 'LM bound', -# showlegend=False, -# )) -# -# ymin = min([min(trace['y']) for trace in data]) -# ymin -= 0.1*abs(1.0-ymin) #pad by 10% -# -# layout = go.Layout( -# width=800*scale, -# height=400*scale, -# title=title, -# titlefont=dict(size=16), -# xaxis=dict( -# title=xlabel, -# titlefont=dict(size=14), -# range=xlim if xlim else [0,max(xdata)], -# ), -# yaxis=dict( -# title='Mean survival probability', -# titlefont=dict(size=14), -# range=ylim if ylim else [ymin,1.0], -# ), -# legend=dict( -# font=dict( -# size=13, -# ), -# ) -# ) -# -# pythonVal = {} -# for i,tr in enumerate(data): -# key = tr['name'] if ("name" in tr) else "trace%d" % i -# pythonVal[key] = {'x': tr['x'], 'y': tr['y']} -# -# #reverse order of data so z-ordering is nicer -# return ReportFigure(go.Figure(data=list(reversed(data)), layout=layout), -# None, pythonVal) -# -# #newplotgca.set_xlabel(xlabel, fontsize=15) -# #newplotgca.set_ylabel('Mean survival probability',fontsize=15) -# #if title==True: -# # newplotgca.set_title('Randomized Benchmarking Decay', fontsize=18) -# #newplotgca.set_frame_on(True) -# #newplotgca.yaxis.grid(False) -# #newplotgca.tick_params(axis='x', top='off', labelsize=12) -# #newplotgca.tick_params(axis='y', left='off', right='off', labelsize=12) -# -# #if legend==True: -# # leg = _plt.legend(fancybox=True, loc=loc) -# # leg.get_frame().set_alpha(0.9) -# -# #newplotgca.spines["top"].set_visible(False) -# #newplotgca.spines["right"].set_visible(False) -# #newplotgca.spines["bottom"].set_alpha(.7) -# #newplotgca.spines["left"].set_alpha(.7) - - -#Histograms?? -#TODO: histogram -# if histogram: -# fig = _plt.figure() -# histdata = subMxSums.flatten() -# #take gives back (1,N) shaped array (why?) -# histdata_finite = _np.take(histdata, _np.where(_np.isfinite(histdata)))[0] -# histMin = min( histdata_finite ) if cmapFactory.vmin is None else cmapFactory.vmin -# histMax = max( histdata_finite ) if cmapFactory.vmax is None else cmapFactory.vmax -# _plt.hist(_np.clip(histdata_finite,histMin,histMax), histBins, -# range=[histMin, histMax], facecolor='gray', align='mid') -# if save_to is not None: -# if len(save_to) > 0: -# _plt.savefig( _makeHistFilename(save_to) ) -# _plt.close(fig) - -# if histogram: -# fig = _plt.figure() -# histdata = _np.concatenate( [ sub_mxs[iy][ix].flatten() for ix in range(nXs) for iy in range(nYs)] ) -# #take gives back (1,N) shaped array (why?) -# histdata_finite = _np.take(histdata, _np.where(_np.isfinite(histdata)))[0] -# histMin = min( histdata_finite ) if cmapFactory.vmin is None else cmapFactory.vmin -# histMax = max( histdata_finite ) if cmapFactory.vmax is None else cmapFactory.vmax -# _plt.hist(_np.clip(histdata_finite,histMin,histMax), histBins, -# range=[histMin, histMax], facecolor='gray', align='mid') -# if save_to is not None: -# if len(save_to) > 0: -# _plt.savefig( _makeHistFilename(save_to) ) -# _plt.close(fig) diff --git a/pygsti/tools/pdftools.py b/pygsti/tools/pdftools.py index c73e27dc4..57a3e65de 100644 --- a/pygsti/tools/pdftools.py +++ b/pygsti/tools/pdftools.py @@ -71,28 +71,3 @@ def classical_fidelity(p, q): # sqrt_fidelity += _np.sqrt(x * y) return _np.sum([_np.sqrt(x * q.get(event, 0.)) for (event, x) in p.items()]) ** 2 - - #return root_fidelity ** 2 - - -# def Hoyer_sparsity_measure(p, n): -# """ -# Computes a measure of the sparsity ("spikyness") of a probability distribution (or a -# general real vector). - -# Parameters -# ---------- -# p : dict -# The distribution - -# n : the number of possible events (zero probability events do not need to be included in `p`) - -# Returns -# ------- -# float -# """ -# plist = _np.array(list(p.values())) -# twonorm = _np.sqrt(_np.sum(plist**2)) -# onenorm = _np.sum(_np.abs(plist)) -# max_onenorm_over_twonorm = _np.sqrt(n) -# return (max_onenorm_over_twonorm - onenorm/twonorm) / (max_onenorm_over_twonorm - 1) diff --git a/pygsti/tools/rbtheory.py b/pygsti/tools/rbtheory.py index 6ea77d8b4..7191bbb03 100644 --- a/pygsti/tools/rbtheory.py +++ b/pygsti/tools/rbtheory.py @@ -458,255 +458,6 @@ def R_matrix(model, group, group_to_model=None, weights=None): # noqa N802 return R -### COMMENTED OUT SO THAT THIS FILE DOESN'T NEED "from .. import construction as _cnst". -### THIS SHOULD BE ADDED BACK IN AT SOME POINT. -# def exact_rb_asps(model, group, m_max, m_min=0, m_step=1, success_outcomelabel=('0',), -# group_to_model=None, weights=None, compilation=None, group_twirled=False): -# """ -# Calculates the exact RB average success probablilites (ASP). - -# Uses some generalizations of the formula given Proctor et al -# Phys. Rev. Lett. 119, 130502 (2017). This formula does not scale well with -# group size and qubit number, and for the Clifford group it is likely only -# practical for a single qubit. - -# Parameters -# ---------- -# model : Model -# The noisy model (e.g., the Cliffords) to calculate the R matrix of. -# The correpsonding `target` model (not required in this function) -# must be equal to or a subset of (a faithful rep of) the group `group`. -# If group_to_model is None, the labels of the gates in model should be -# the same as the labels of the corresponding group elements in `group`. -# For Clifford RB `model` should be the clifford model; for direct RB -# this should be the native model. - -# group : MatrixGroup -# The group that the `model` model contains gates from. For Clifford RB -# or direct RB, this would be the Clifford group. - -# m_max : int -# The maximal sequence length of the random gates, not including the -# inversion gate. - -# m_min : int, optional -# The minimal sequence length. Defaults to the smallest valid value of 0. - -# m_step : int, optional -# The step size between sequence lengths. Defaults to the smallest valid -# value of 1. - -# success_outcomelabel : str or tuple, optional -# The outcome label associated with success. - -# group_to_model : dict, optional -# If not None, a dictionary that maps labels of group elements to labels -# of model. This is required if the labels of the gates in `model` are different -# from the labels of the corresponding group elements in `group`. - -# weights : dict, optional -# If not None, a dictionary of floats, whereby the keys are the gates in model -# and the values are the unnormalized probabilities to apply each gate at -# for each layer of the RB protocol. If None, the weighting defaults to an -# equal weighting on all gates, as used in most RB protocols (e.g., Clifford -# RB). - -# compilation : dict, optional -# If `model` is not the full group `group` (with the same labels), then a -# compilation for the group elements, used to implement the inversion gate -# (and the initial randomgroup element, if `group_twirled` is True). This -# is a dictionary with the group labels as keys and a gate sequence of the -# elements of `model` as values. - -# group_twirled : bool, optional -# If True, the random sequence starts with a single uniformly random group -# element before the m random elements of `model`. - -# Returns -# ------- -# m : float -# Array of sequence length values that the ASPs have been calculated for. - -# P_m : float -# Array containing ASP values for the specified sequence length values. -# """ -# if compilation is None: -# for key in list(model.operations.keys()): -# assert(key in group.labels), "Gates labels are not in `group`, so `compilation must be specified." -# for label in group.labels: -# assert(label in list(model.operations.keys()) -# ), "Some group elements not in `model`, so `compilation must be specified." - -# i_max = _np.floor((m_max - m_min) / m_step).astype('int') -# m = _np.zeros(1 + i_max, _np.int64) -# P_m = _np.zeros(1 + i_max, float) -# group_dim = len(group) -# R = R_matrix(model, group, group_to_model=group_to_model, weights=weights) -# success_prepLabel = list(model.preps.keys())[0] # just take first prep -# success_effectLabel = success_outcomelabel[-1] if isinstance(success_outcomelabel, tuple) \ -# else success_outcomelabel -# extended_E = _np.kron(_mtls.column_basis_vector(0, group_dim).T, model.povms['Mdefault'][success_effectLabel].T) -# extended_rho = _np.kron(_mtls.column_basis_vector(0, group_dim), model.preps[success_prepLabel]) - -# if compilation is None: -# extended_E = group_dim * _np.dot(extended_E, R) -# if group_twirled is True: -# extended_rho = _np.dot(R, extended_rho) -# else: -# full_model = _cnst.create_explicit_alias_model(model, compilation) -# R_fullgroup = R_matrix(full_model, group) -# extended_E = group_dim * _np.dot(extended_E, R_fullgroup) -# if group_twirled is True: -# extended_rho = _np.dot(R_fullgroup, extended_rho) - -# Rstep = _np.linalg.matrix_power(R, m_step) -# Riterate = _np.linalg.matrix_power(R, m_min) -# for i in range(0, 1 + i_max): -# m[i] = m_min + i * m_step -# P_m[i] = _np.dot(extended_E, _np.dot(Riterate, extended_rho)) -# Riterate = _np.dot(Rstep, Riterate) - -# return m, P_m - -### COMMENTED OUT SO THAT THIS FILE DOESN'T NEED "from .. import construction as _cnst" -### THIS SHOULD BE ADDED BACK IN AT SOME POINT. -# def L_matrix_asps(model, target_model, m_max, m_min=0, m_step=1, success_outcomelabel=('0',), # noqa N802 -# compilation=None, group_twirled=False, weights=None, gauge_optimize=True, -# return_error_bounds=False, norm='diamond'): -# """ -# Computes RB average survival probablities, as predicted by the 'L-matrix' theory. - -# This theory was introduced in Proctor et al Phys. Rev. Lett. 119, 130502 -# (2017). Within the function, the model is gauge-optimized to target_model. This is -# *not* optimized to the gauge specified by Proctor et al, but instead performs the -# standard pyGSTi gauge-optimization (using the frobenius distance). In most cases, -# this is likely to be a reasonable proxy for the gauge optimization perscribed by -# Proctor et al. - -# Parameters -# ---------- -# model : Model -# The noisy model. - -# target_model : Model -# The target model. - -# m_max : int -# The maximal sequence length of the random gates, not including the inversion gate. - -# m_min : int, optional -# The minimal sequence length. Defaults to the smallest valid value of 0. - -# m_step : int, optional -# The step size between sequence lengths. - -# success_outcomelabel : str or tuple, optional -# The outcome label associated with success. - -# compilation : dict, optional -# If `model` is not the full group, then a compilation for the group elements, -# used to implement the inversion gate (and the initial random group element, -# if `group_twirled` is True). This is a dictionary with the group labels as -# keys and a gate sequence of the elements of `model` as values. - -# group_twirled : bool, optional -# If True, the random sequence starts with a single uniformly random group -# element before the m random elements of `model`. - -# weights : dict, optional -# If not None, a dictionary of floats, whereby the keys are the gates in model -# and the values are the unnormalized probabilities to apply each gate at -# for each layer of the RB protocol. If None, the weighting defaults to an -# equal weighting on all gates, as used in most RB protocols (e.g., Clifford -# RB). - -# gauge_optimize : bool, optional -# If True a gauge-optimization to the target model is implemented before -# calculating all quantities. If False, no gauge optimization is performed. -# Whether or not a gauge optimization is performed does not affect the rate of -# decay but it will generally affect the exact form of the decay. E.g., if a -# perfect model is given to the function -- but in the "wrong" gauge -- no -# decay will be observed in the output P_m, but the P_m can be far from 1 (even -# for perfect SPAM) for all m. The gauge optimization is optional, as it is -# not guaranteed to always improve the accuracy of the reported P_m, although when -# gauge optimization is performed this limits the possible deviations of the -# reported P_m from the true P_m. - -# return_error_bounds : bool, optional -# Sets whether or not to return error bounds for how far the true ASPs can deviate -# from the values returned by this function. - -# norm : str, optional -# The norm used in the error bound calculation. Either 'diamond' for the diamond -# norm (the default) or '1to1' for the Hermitian 1 to 1 norm. - -# Returns -# ------- -# m : float -# Array of sequence length values that the ASPs have been calculated for. -# P_m : float -# Array containing predicted ASP values for the specified sequence length values. -# if error_bounds is True : -# lower_bound: float -# Array containing lower bounds on the possible ASP values - -# upper_bound: float -# Array containing upper bounds on the possible ASP values -# """ -# d = int(round(_np.sqrt(model.dim))) - -# if gauge_optimize: -# model_go = _algs.gaugeopt_to_target(model, target_model) -# else: -# model_go = model.copy() -# L = L_matrix(model_go, target_model, weights=weights) -# success_prepLabel = list(model.preps.keys())[0] # just take first prep -# success_effectLabel = success_outcomelabel[-1] if isinstance(success_outcomelabel, tuple) \ -# else success_outcomelabel -# identity_vec = _mtls.vec(_np.identity(d**2, float)) - -# if compilation is not None: -# model_group = _cnst.create_explicit_alias_model(model_go, compilation) -# model_target_group = _cnst.create_explicit_alias_model(target_model, compilation) -# delta = gate_dependence_of_errormaps(model_group, model_target_group, norm=norm) -# emaps = errormaps(model_group, model_target_group) -# E_eff = _np.dot(model_go.povms['Mdefault'][success_effectLabel].T, emaps.operations['Gavg']) - -# if group_twirled is True: -# L_group = L_matrix(model_group, model_target_group) - -# if compilation is None: -# delta = gate_dependence_of_errormaps(model_go, target_model, norm=norm) -# emaps = errormaps(model_go, target_model) -# E_eff = _np.dot(model_go.povms['Mdefault'][success_effectLabel].T, emaps.operations['Gavg']) - -# i_max = _np.floor((m_max - m_min) / m_step).astype('int') -# m = _np.zeros(1 + i_max, _np.int64) -# P_m = _np.zeros(1 + i_max, float) -# upper_bound = _np.zeros(1 + i_max, float) -# lower_bound = _np.zeros(1 + i_max, float) - -# Lstep = _np.linalg.matrix_power(L, m_step) -# Literate = _np.linalg.matrix_power(L, m_min) -# for i in range(0, 1 + i_max): -# m[i] = m_min + i * m_step -# if group_twirled: -# L_m_rdd = _mtls.unvec(_np.dot(L_group, _np.dot(Literate, identity_vec))) -# else: -# L_m_rdd = _mtls.unvec(_np.dot(Literate, identity_vec)) -# P_m[i] = _np.dot(E_eff, _np.dot(L_m_rdd, model_go.preps[success_prepLabel])) -# Literate = _np.dot(Lstep, Literate) -# upper_bound[i] = P_m[i] + delta / 2 -# lower_bound[i] = P_m[i] - delta / 2 -# if upper_bound[i] > 1: -# upper_bound[i] = 1. -# if lower_bound[i] < 0: -# lower_bound[i] = 0. -# if return_error_bounds: -# return m, P_m, lower_bound, upper_bound -# else: -# return m, P_m - def errormaps(model, target_model): """ From 5aa5cb7466ae87b239f7fd56248227613ff9c852 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Thu, 24 Oct 2024 09:05:03 -0400 Subject: [PATCH 39/41] add back commented-out function per request from Corey --- pygsti/algorithms/germselection.py | 74 ++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/pygsti/algorithms/germselection.py b/pygsti/algorithms/germselection.py index 6588a5879..94c156c01 100644 --- a/pygsti/algorithms/germselection.py +++ b/pygsti/algorithms/germselection.py @@ -3295,7 +3295,81 @@ def symmetric_low_rank_spectrum_update(update, orig_e, U, proj_U, force_rank_inc #return the new eigenvalues return new_evals, True +# Note: Th function below won't work for our purposes because of the assumptions +# about the rank of the update on the nullspace of the matrix we're updating, +# but keeping this here commented for future reference. +''' +def riedel_style_inverse_trace(update, orig_e, U, proj_U, force_rank_increase=True): + """ + input: + + update : ndarray + symmetric low-rank update to perform. + This is the first half the symmetric rank decomposition s.t. + update@update.T= the full update matrix. + + orig_e : ndarray + Spectrum of the original matrix. This is a 1-D array. + + proj_U : ndarray + Projector onto the complement of the column space of the + original matrix's eigenvectors. + + output: + + trace : float + Value of the trace of the updated psuedoinverse matrix. + + updated_rank : int + total rank of the updated matrix. + + rank_increase_flag : bool + a flag that is returned to indicate is a candidate germ failed to amplify additional parameters. + This indicates things short circuited and so the scoring function should skip this germ. + """ + #First we need to for the matrix P, whose column space + #forms an orthonormal basis for the component of update + #that is in the complement of U. + + proj_update= proj_U@update + + #Next take the RRQR decomposition of this matrix: + q_update, r_update, _ = _sla.qr(proj_update, mode='economic', pivoting=True) + + #Construct P by taking the columns of q_update corresponding to non-zero values of r_A on the diagonal. + nonzero_indices_update= _np.nonzero(_np.diag(r_update)>1e-10) #HARDCODED (threshold is hardcoded) + + #if the rank doesn't increase then we can't use the Riedel approach. + #Abort early and return a flag to indicate the rank did not increase. + if len(nonzero_indices_update[0])==0 and force_rank_increase: + return None, None, False + + P= q_update[: , nonzero_indices_update[0]] + + updated_rank= len(orig_e)+ len(nonzero_indices_update[0]) + + #Now form the matrix R_update which is given by P.T @ proj_update. + R_update= P.T@proj_update + + #R_update gets concatenated with U.T@update to form + #a block column matrixblock_column= np.concatenate([U.T@update, R_update], axis=0) + + Uta= U.T@update + + try: + RRRDinv= R_update@_np.linalg.inv(R_update.T@R_update) + except _np.linalg.LinAlgError as err: + print('Numpy thinks this matrix is singular, condition number is: ', _np.linalg.cond(R_update.T@R_update)) + print((R_update.T@R_update).shape) + raise err + pinv_orig_e_mat= _np.diag(1/orig_e) + + trace= _np.sum(1/orig_e) + _np.trace( RRRDinv@(_np.eye(Uta.shape[1]) + Uta.T@pinv_orig_e_mat@Uta)@RRRDinv.T ) + + return trace, updated_rank, True +''' + def minamide_style_inverse_trace(update, orig_e, U, proj_U, force_rank_increase=False): """ This function performs a low-rank update to the components of From 344670dd12a98c750bf87e0d4877749018a03e74 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Thu, 24 Oct 2024 09:25:03 -0400 Subject: [PATCH 40/41] revert out-of-scope and unnecessary change --- pygsti/extras/interpygate/process_tomography.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pygsti/extras/interpygate/process_tomography.py b/pygsti/extras/interpygate/process_tomography.py index 42908777e..fba79adb6 100644 --- a/pygsti/extras/interpygate/process_tomography.py +++ b/pygsti/extras/interpygate/process_tomography.py @@ -86,7 +86,7 @@ def unvec_square(vectorized, order): n = int(_np.sqrt(max(vectorized.shape))) if len(vectorized) == n ** 2: - return vectorized.reshape(shape=(n, n), order=order) + return vectorized.reshape((n, n), order=order) else: msg = 'The input vector length must be a perfect square, but this input has length %d.' % len(vectorized) raise ValueError(msg) From 00982b291c80a972790cf09dfaa4cf556ad1d9da Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Thu, 24 Oct 2024 10:14:14 -0400 Subject: [PATCH 41/41] restore fogitools to state on develop --- pygsti/tools/fogitools.py | 219 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 219 insertions(+) diff --git a/pygsti/tools/fogitools.py b/pygsti/tools/fogitools.py index ec8fcc374..bbc8bed1c 100644 --- a/pygsti/tools/fogitools.py +++ b/pygsti/tools/fogitools.py @@ -732,6 +732,41 @@ def resolve_norm_order(vecs_to_normalize, label_lists, given_norm_order): return (fogi_dirs, fogi_meta, dep_fogi_dirs, dep_fogi_meta) +#def create_fogi_dir_labels(fogi_opsets, fogi_dirs, fogi_rs, fogi_gaugespace_dirs, errorgen_coefficients): +# +# fogi_names = [] +# fogi_abbrev_names = [] +# +# # Note: fogi_dirs is a 2D array, so .T to iterate over cols, whereas fogi_gaugespace_dirs +# # is a list of vectors, so just iterating is fine. +# for opset, fogi_dir, fogi_epsilon in zip(fogi_opsets, fogi_dirs.T, fogi_gaugespace_dirs): +# +# if len(opset) == 1: # Intrinsic quantity +# assert(fogi_epsilon is None) +# op_elemgen_labels = errorgen_coefficient_labels[op_label] +# errgen_name = elem_vec_name(fogi_dir, op_elemgen_labels) +# errgen_names_abbrev = elem_vec_names(local_fogi_dirs, op_elemgen_labels, include_type=False) +# fogi_names.extend(["%s_%s" % ((("(%s)" % egname) if (' ' in egname) else egname), +# op_label_abbrevs.get(op_label, str(op_label))) +# for egname in errgen_names]) +# fogi_abbrev_names.extend(errgen_names_abbrev) +# +# intersection_space_to_add = _np.take(intersection_space, rel_cols_to_add, axis=1) +# #intersection_space_to_add = _np.dot(gauge_linear_combos, indep_intersection_space) \ +# # if (gauge_linear_combos is not None) else intersection_space_to_add +# +# +# +# +# intersection_names = elem_vec_names(intersection_space_to_add, gauge_elemgen_labels) +# intersection_names_abbrev = elem_vec_names(intersection_space_to_add, gauge_elemgen_labels, +# include_type=False) +# fogi_names.extend(["ga(%s)_%s - ga(%s)_%s" % ( +# iname, "|".join([op_label_abbrevs.get(l, str(l)) for l in existing_set]), +# iname, op_label_abbrevs.get(op_label, str(op_label))) for iname in intersection_names]) +# fogi_abbrev_names.extend(["ga(%s)" % iname for iname in intersection_names_abbrev]) + + def compute_maximum_relational_errors(primitive_op_labels, errorgen_coefficients, gauge_action_matrices, errorgen_coefficient_bases_by_op, gauge_basis, model_dim): """ TODO: docstring """ @@ -830,6 +865,190 @@ def _create_errgen_op(vec, list_of_mxs): return ret +#An alternative but inferior algorithm for constructing FOGI quantities: Keep around for checking/reference or REMOVE? +#def _compute_fogi_via_nullspaces(self, primitive_op_labels, ham_basis, other_basis, other_mode="all", +# ham_gauge_linear_combos=None, other_gauge_linear_combos=None, +# op_label_abbrevs=None, reduce_to_model_space=True): +# num_ham_elem_errgens = (len(ham_basis) - 1) +# num_other_elem_errgens = (len(other_basis) - 1)**2 if other_mode == "all" else (len(other_basis) - 1) +# ham_elem_labels = [('H', bel) for bel in ham_basis.labels[1:]] +# other_elem_labels = [('S', bel) for bel in other_basis.labels[1:]] if other_mode != "all" else \ +# [('S', bel1, bel2) for bel1 in other_basis.labels[1:] for bel2 in other_basis.labels[1:]] +# assert(len(ham_elem_labels) == num_ham_elem_errgens) +# assert(len(other_elem_labels) == num_other_elem_errgens) +# +# #Get lists of the present (existing within the model) labels for each operation +# ham_labels_for_op = {op_label: ham_elem_labels[:] for op_label in primitive_op_labels} # COPY lists! +# other_labels_for_op = {op_label: other_elem_labels[:] for op_label in primitive_op_labels} # ditto +# if reduce_to_model_space: +# for op_label in primitive_op_labels: +# op = self.operations[op_label] +# lbls = op.errorgen_coefficient_labels() +# present_ham_elem_lbls = set(filter(lambda lbl: lbl[0] == 'H', lbls)) +# present_other_elem_lbls = set(filter(lambda lbl: lbl[0] == 'S', lbls)) +# +# disallowed_ham_space_labels = set(ham_elem_labels) - present_ham_elem_lbls +# disallowed_row_indices = [ham_elem_labels.index(disallowed_lbl) +# for disallowed_lbl in disallowed_ham_space_labels] +# for i in sorted(disallowed_row_indices, reverse=True): +# del ham_labels_for_op[op_label][i] +# +# disallowed_other_space_labels = set(other_elem_labels) - present_other_elem_lbls +# disallowed_row_indices = [other_elem_labels.index(disallowed_lbl) +# for disallowed_lbl in disallowed_other_space_labels] +# for i in sorted(disallowed_row_indices, reverse=True): +# del other_labels_for_op[op_label][i] +# +# #Step 1: construct nullspaces associated with sets of operations +# ham_nullspaces = {} +# other_nullspaces = {} +# max_size = len(primitive_op_labels) +# for set_size in range(1, max_size + 1): +# ham_nullspaces[set_size] = {} # dict mapping operation-sets of `set_size` to nullspaces +# other_nullspaces[set_size] = {} +# +# for op_set in _itertools.combinations(primitive_op_labels, set_size): +# #print(op_set) +# ham_gauge_action_mxs = [] +# other_gauge_action_mxs = [] +# ham_rows_by_op = {}; h_off = 0 +# other_rows_by_op = {}; o_off = 0 +# for op_label in op_set: # Note: "ga" stands for "gauge action" in variable names below +# op = self.operations[op_label] +# if isinstance(op, _op.LindbladOp): +# op_mx = op.unitary_postfactor.to_dense() +# else: +# assert(False), "STOP - you probably don't want to do this!" +# op_mx = op.to_dense() +# U = _bt.change_basis(op_mx, self.basis, 'std') +# ham_ga = _gt.first_order_ham_gauge_action_matrix(U, ham_basis) +# other_ga = _gt.first_order_other_gauge_action_matrix(U, other_basis, other_mode) +# +# if ham_gauge_linear_combos is not None: +# ham_ga = _np.dot(ham_ga, ham_gauge_linear_combos) +# if other_gauge_linear_combos is not None: +# other_ga = _np.dot(other_ga, other_gauge_linear_combos) +# +# ham_gauge_action_mxs.append(ham_ga) +# other_gauge_action_mxs.append(other_ga) +# reduced_ham_nrows = len(ham_labels_for_op[op_label]) # ham_ga.shape[0] when unrestricted +# reduced_other_nrows = len(other_labels_for_op[op_label]) # other_ga.shape[0] when unrestricted +# ham_rows_by_op[op_label] = slice(h_off, h_off + reduced_ham_nrows); h_off += reduced_ham_nrows +# other_rows_by_op[op_label] = slice(o_off, o_off + reduced_other_nrows); o_off += reduced_other_nrows +# assert(ham_ga.shape[0] == num_ham_elem_errgens) +# assert(other_ga.shape[0] == num_other_elem_errgens) +# +# #Stack matrices to form "base" gauge action matrix for op_set +# ham_ga_mx = _np.concatenate(ham_gauge_action_mxs, axis=0) +# other_ga_mx = _np.concatenate(other_gauge_action_mxs, axis=0) +# +# # Intersect gauge action with the space of elementary errorgens present in the model. +# # We may need to eliminate some rows of X_ga matrices, and (only) keep linear combos +# # of the columns that are zero on these rows. +# present_ham_elem_lbls = set() +# present_other_elem_lbls = set() +# for op_label in op_set: +# op = self.operations[op_label] +# lbls = op.errorgen_coefficient_labels() # length num_coeffs +# present_ham_elem_lbls.update([(op_label, lbl) for lbl in lbls if lbl[0] == 'H']) +# present_other_elem_lbls.update([(op_label, lbl) for lbl in lbls if lbl[0] == 'S']) +# +# full_ham_elem_labels = [(op_label, elem_lbl) for op_label in op_set +# for elem_lbl in ham_elem_labels] +# assert(present_ham_elem_lbls.issubset(full_ham_elem_labels)), \ +# "The given space of hamiltonian elementary gauge-gens must encompass all those in model ops!" +# disallowed_ham_space_labels = set(full_ham_elem_labels) - present_ham_elem_lbls +# disallowed_row_indices = [full_ham_elem_labels.index(disallowed_lbl) +# for disallowed_lbl in disallowed_ham_space_labels] +# +# if reduce_to_model_space and len(disallowed_row_indices) > 0: +# #disallowed_rows = _np.take(ham_ga_mx, disallowed_row_indices, axis=0) +# #allowed_linear_combos = _mt.nice_nullspace(disallowed_rows, tol=1e-4) +# #ham_ga_mx = _np.dot(ham_ga_mx, allowed_linear_combos) +# ham_ga_mx = _np.delete(ham_ga_mx, disallowed_row_indices, axis=0) +# +# full_other_elem_labels = [(op_label, elem_lbl) for op_label in op_set +# for elem_lbl in other_elem_labels] +# assert(present_other_elem_lbls.issubset(full_other_elem_labels)), \ +# "The given space of 'other' elementary gauge-gens must encompass all those in model ops!" +# disallowed_other_space_labels = set(full_other_elem_labels) - present_other_elem_lbls +# disallowed_row_indices = [full_other_elem_labels.index(disallowed_lbl) +# for disallowed_lbl in disallowed_other_space_labels] +# +# if reduce_to_model_space and len(disallowed_row_indices) > 0: +# #disallowed_rows = _np.take(other_ga_mx, disallowed_row_indices, axis=0) +# #allowed_linear_combos = _mt.nice_nullspace(disallowed_rows, tol=1e-4) +# #other_ga_mx = _np.dot(other_ga_mx, allowed_linear_combos) +# other_ga_mx = _np.delete(other_ga_mx, disallowed_row_indices, axis=0) +# +# #Add all known (already tabulated) nullspace directions so that we avoid getting them again +# # when we compute the nullspace of the gauge action matrix below. +# for previous_size in range(1, set_size + 1): # include current size! +# for previous_op_set, (nullsp, previous_rows) in ham_nullspaces[previous_size].items(): +# padded_nullsp = _np.zeros((ham_ga_mx.shape[0], nullsp.shape[1]), 'd') +# for op in previous_op_set: +# if op not in ham_rows_by_op: continue +# padded_nullsp[ham_rows_by_op[op], :] = nullsp[previous_rows[op], :] +# ham_ga_mx = _np.concatenate((ham_ga_mx, padded_nullsp), axis=1) +# +# for previous_op_set, (nullsp, previous_rows) in other_nullspaces[previous_size].items(): +# padded_nullsp = _np.zeros((other_ga_mx.shape[0], nullsp.shape[1]), other_ga_mx.dtype) +# for op in previous_op_set: +# if op not in other_rows_by_op: continue +# padded_nullsp[other_rows_by_op[op], :] = nullsp[previous_rows[op], :] +# other_ga_mx = _np.concatenate((other_ga_mx, padded_nullsp), axis=1) +# +# #Finally, compute the nullspace of the resulting gauge-action + already-tallied matrix: +# nullspace = _mt.nice_nullspace(ham_ga_mx.T) +# ham_nullspaces[set_size][op_set] = (nullspace, ham_rows_by_op) +# #DEBUG: print(" NULLSP DIM = ",nullspace.shape[1]) +# #DEBUG: labels = [(op_label, elem_lbl) for op_label in op_set +# #DEBUG: for elem_lbl in ham_labels_for_op[op_label]] +# #DEBUG: print("\n".join(fogi_names(nullspace, labels, op_label_abbrevs))) +# +# nullspace = _mt.nice_nullspace(other_ga_mx.T) +# other_nullspaces[set_size][op_set] = (nullspace, other_rows_by_op) +# +# # Step 2: convert these per-operation-set nullspaces into vectors over a single "full" +# # space of all the elementary error generators (as given by ham_basis, other_basis, & other_mode) +# +# # Note: "full" designation is for space of all elementary error generators as given by their +# # supplied ham_basis, other_basis, and other_mode. +# +# # Construct full-space vectors for each nullspace vector found by crawling through +# # the X_nullspaces dictionary and embedding values as needed. +# ham_rows_by_op = {}; off = 0 +# for op_label in primitive_op_labels: +# ham_rows_by_op[op_label] = slice(off, off + len(ham_labels_for_op[op_label])) +# off += len(ham_labels_for_op[op_label]) +# full_ham_fogi_vecs = _np.empty((off, 0), 'd') +# for size in range(1, max_size + 1): +# for op_set, (nullsp, op_set_rows) in ham_nullspaces[size].items(): +# padded_nullsp = _np.zeros((full_ham_fogi_vecs.shape[0], nullsp.shape[1]), 'd') +# for op in op_set: +# padded_nullsp[ham_rows_by_op[op], :] = nullsp[op_set_rows[op], :] +# full_ham_fogi_vecs = _np.concatenate((full_ham_fogi_vecs, padded_nullsp), axis=1) +# +# other_rows_by_op = {}; off = 0 +# for op_label in primitive_op_labels: +# other_rows_by_op[op_label] = slice(off, off + len(other_labels_for_op[op_label])) +# off += len(other_labels_for_op[op_label]) +# full_other_fogi_vecs = _np.empty((off, 0), complex) +# for size in range(1, max_size + 1): +# for op_set, (nullsp, op_set_rows) in other_nullspaces[size].items(): +# padded_nullsp = _np.zeros((full_other_fogi_vecs.shape[0], nullsp.shape[1]), complex) +# for op in op_set: +# padded_nullsp[other_rows_by_op[op], :] = nullsp[op_set_rows[op], :] +# full_other_fogi_vecs = _np.concatenate((full_other_fogi_vecs, padded_nullsp), axis=1) +# +# assert(_np.linalg.matrix_rank(full_ham_fogi_vecs) == full_ham_fogi_vecs.shape[1]) +# assert(_np.linalg.matrix_rank(full_other_fogi_vecs) == full_other_fogi_vecs.shape[1]) +# +# # Returns the vectors of FOGI (first order gauge invariant) linear combos as well +# # as lists of labels for the columns & rows, respectively. +# return (full_ham_fogi_vecs, ham_labels_for_op), (full_other_fogi_vecs, other_labels_for_op) + + def op_elem_vec_name(vec, elem_op_labels, op_label_abbrevs): name = "" for i, (op_lbl, elem_lbl) in enumerate(elem_op_labels):