text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def all_hermitian(self): """ Check if all basis operators are hermitian. """
if self._all_hermitian is None: _log.debug("Testing and caching if all basis operator are hermitian") self._all_hermitian = all((is_hermitian(op) for op in self.ops)) return self._all_hermitian
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def product(self, *bases): """ Compute the tensor product with another basis. :param bases: One or more additional bases to form the product with. :return (OperatorBasis): The tensor product basis as an OperatorBasis object. """
if len(bases) > 1: basis_rest = bases[0].product(*bases[1:]) else: assert len(bases) == 1 basis_rest = bases[0] labels_ops = [(b1l + b2l, qt.tensor(b1, b2)) for (b1l, b1), (b2l, b2) in itertools.product(self, basis_rest)] return OperatorBasis(labels_ops)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def super_basis(self): """ Generate the superoperator basis in which the Choi matrix can be represented. The follows the definition in `Chow et al. <https://doi.org/10.1103/PhysRevLett.109.060501>`_ :return (OperatorBasis): The super basis as an OperatorBasis object. """
labels_ops = [(bnl + "^T (x) " + bml, qt.sprepost(bm, bn)) for (bnl, bn), (bml, bm) in itertools.product(self, self)] return OperatorBasis(labels_ops)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def project_op(self, op): """ Project an operator onto the basis. :param qutip.Qobj op: The operator to project. :return: The projection coefficients as a numpy array. :rtype: scipy.sparse.csr_matrix """
if not self.is_orthonormal(): # pragma no coverage raise ValueError("project_op only implemented for orthonormal operator bases") return self.basis_transform.H * qt.operator_to_vector(op).data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_unitary(matrix: np.ndarray) -> bool: """ A helper function that checks if a matrix is unitary. :param matrix: a matrix to test unitarity of :return: true if and only if matrix is unitary """
rows, cols = matrix.shape if rows != cols: return False return np.allclose(np.eye(rows), matrix.dot(matrix.T.conj()))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def most_significant_bit(lst: np.ndarray) -> int: """ A helper function that finds the position of the most significant bit in a 1darray of 1s and 0s, i.e. the first position where a 1 appears, reading left to right. :param lst: a 1d array of 0s and 1s with at least one 1 :return: the first position in lst that a 1 appears """
return np.argwhere(np.asarray(lst) == 1)[0][0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bitwise_xor(bs0: str, bs1: str) -> str: """ A helper to calculate the bitwise XOR of two bit string :param bs0: String of 0's and 1's representing a number in binary representations :param bs1: String of 0's and 1's representing a number in binary representations :return: String of 0's and 1's representing the XOR between bs0 and bs1 """
if len(bs0) != len(bs1): raise ValueError("Bit strings are not of equal length") n_bits = len(bs0) return PADDED_BINARY_BIT_STRING.format(xor(int(bs0, 2), int(bs1, 2)), n_bits)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def numpart_qaoa(asset_list, A=1.0, minimizer_kwargs=None, steps=1): """ generate number partition driver and cost functions :param asset_list: list to binary partition :param A: (float) optional constant for level separation. Default=1. :param minimizer_kwargs: Arguments for the QAOA minimizer :param steps: (int) number of steps approximating the solution. """
cost_operators = [] ref_operators = [] for ii in range(len(asset_list)): for jj in range(ii + 1, len(asset_list)): cost_operators.append(PauliSum([PauliTerm("Z", ii, 2*asset_list[ii]) * PauliTerm("Z", jj, A*asset_list[jj])])) ref_operators.append(PauliSum([PauliTerm("X", ii, -1.0)])) cost_operators.append(PauliSum([PauliTerm("I", 0, len(asset_list))])) if minimizer_kwargs is None: minimizer_kwargs = {'method': 'Nelder-Mead', 'options': {'ftol': 1.0e-2, 'xtol': 1.0e-2, 'disp': True}} qc = get_qc(f"{len(asset_list)}q-qvm") qaoa_inst = QAOA(qc, list(range(len(asset_list))), steps=steps, cost_ham=cost_operators, ref_ham=ref_operators, store_basis=True, minimizer=minimize, minimizer_kwargs=minimizer_kwargs, vqe_options={'disp': print}) return qaoa_inst
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def estimate_gradient(f_h: float, precision: int, gradient_max: int = 1, n_measurements: int = 50, qc: QuantumComputer = None) -> float: """ Estimate the gradient using function evaluation at perturbation, h. :param f_h: Oracle output at perturbation h. :param precision: Bit precision of gradient. :param gradient_max: OOM estimate of largest gradient value. :param n_measurements: Number of times to measure system. :param qc: The QuantumComputer object. :return: Decimal estimate of gradient. """
# scale f_h by range of values gradient can take on f_h *= 1. / gradient_max # generate gradient program perturbation_sign = np.sign(f_h) p_gradient = gradient_program(f_h, precision) # run gradient program if qc is None: qc = get_qc(f"{len(p_gradient.get_qubits())}q-qvm") p_gradient.wrap_in_numshots_loop(n_measurements) executable = qc.compiler.native_quil_to_executable(p_gradient) measurements = qc.run(executable) # summarize measurements bf_estimate = perturbation_sign * measurements_to_bf(measurements) bf_explicit = '{0:.16f}'.format(bf_estimate) deci_estimate = binary_float_to_decimal_float(bf_explicit) # rescale gradient deci_estimate *= gradient_max return deci_estimate
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_tomography_programs(process, qubits=None, pre_rotation_generator=tomography.default_rotations, post_rotation_generator=tomography.default_rotations): """ Generator that yields tomographic sequences that wrap a process encoded by a QUIL program `proc` in tomographic rotations on the specified `qubits`. If `qubits is None`, it assumes all qubits in the program should be tomographically rotated. :param Program process: A Quil program :param list|NoneType qubits: The specific qubits for which to generate the tomographic sequences :param pre_rotation_generator: A generator that yields tomographic pre-rotations to perform. :param post_rotation_generator: A generator that yields tomographic post-rotations to perform. :return: Program for process tomography. :rtype: Program """
if qubits is None: qubits = process.get_qubits() for tomographic_pre_rotation in pre_rotation_generator(*qubits): for tomography_post_rotation in post_rotation_generator(*qubits): process_tomography_program = Program(Pragma("PRESERVE_BLOCK")) process_tomography_program.inst(tomographic_pre_rotation) process_tomography_program.inst(process) process_tomography_program.inst(tomography_post_rotation) process_tomography_program.inst(Pragma("END_PRESERVE_BLOCK")) yield process_tomography_program
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def do_process_tomography(process, nsamples, cxn, qubits=None, use_run=False): """ Method to perform a process tomography. :param Program process: Process to execute. :param int nsamples: Number of samples to take for the program. :param QVMConnection|QPUConnection cxn: Connection on which to run the program. :param list qubits: List of qubits for the program. to use in the tomography analysis. :param bool use_run: If ``True``, use append measurements on all qubits and use ``cxn.run`` instead of ``cxn.run_and_measure``. :return: The process tomogram :rtype: ProcessTomography """
return tomography._do_tomography(process, nsamples, cxn, qubits, tomography.MAX_QUBITS_PROCESS_TOMO, ProcessTomography, process_tomography_programs, DEFAULT_PROCESS_TOMO_SETTINGS, use_run=use_run)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def estimate_from_ssr(histograms, readout_povm, pre_channel_ops, post_channel_ops, settings): """ Estimate a quantum process from single shot histograms obtained by preparing specific input states and measuring bitstrings in the Z-eigenbasis after application of given channel operators. :param numpy.ndarray histograms: The single shot histograms. :param DiagonalPOVM readout_povm: The POVM corresponding to readout plus classifier. :param list pre_channel_ops: The input state preparation channels as `qutip.Qobj`'s. :param list post_channel_ops: The tomography post-process channels as `qutip.Qobj`'s. :param TomographySettings settings: The solver and estimation settings. :return: The ProcessTomography object and results from the the given data. :rtype: ProcessTomography """
nqc = len(pre_channel_ops[0].dims[0]) pauli_basis = grove.tomography.operator_utils.PAULI_BASIS ** nqc pi_basis = readout_povm.pi_basis if not histograms.shape[-1] == pi_basis.dim: # pragma no coverage raise ValueError("Currently tomography is only implemented for two-level systems") rho0 = grove.tomography.operator_utils.n_qubit_ground_state(nqc) n_lkj = np.asarray(histograms) b_jkl_mn = _prepare_b_jkl_mn(readout_povm, pauli_basis, pre_channel_ops, post_channel_ops, rho0) r_mn = cvxpy.Variable(pauli_basis.dim ** 2) p_jkl = b_jkl_mn.real * r_mn obj = -np.matrix(n_lkj.ravel()) * cvxpy.log(p_jkl) # cvxpy has col-major order and we collapse k and l onto single dimension p_jkl_mat = cvxpy.reshape(p_jkl, pi_basis.dim, len(pre_channel_ops) * len(post_channel_ops)) # Default constraints: # MLE must describe valid probability distribution # i.e., for each k and l, p_jkl must sum to one and be element-wise non-negative: # 1. \sum_j p_jkl == 1 for all k, l # 2. p_jkl >= 0 for all j, k, l # where p_jkl = \sum_m b_jkl_mn r_mn constraints = [p_jkl >= 0, np.matrix(np.ones((1, pi_basis.dim))) * p_jkl_mat == 1] r_mn_mat = cvxpy.reshape(r_mn, pauli_basis.dim, pauli_basis.dim) super_pauli_basis = pauli_basis.super_basis() choi_real_imag = sum((r_mn_mat[jj, kk] * o_ut.to_realimag( super_pauli_basis.ops[jj + kk * pauli_basis.dim]) for jj in range(pauli_basis.dim) for kk in range(pauli_basis.dim)), 0) if COMPLETELY_POSITIVE in settings.constraints: if tomography._SDP_SOLVER.is_functional(): constraints.append(choi_real_imag >> 0) else: # pragma no coverage _log.warning("No convex solver capable of semi-definite problems installed.\n" "Dropping the complete positivity constraint on the process") if TRACE_PRESERVING in settings.constraints: constraints.append(r_mn_mat[0, 0] == 1) constraints.append(r_mn_mat[0, 1:] == 0) prob = cvxpy.Problem(cvxpy.Minimize(obj), constraints) _ = prob.solve(solver=tomography.SOLVER, **settings.solver_kwargs) r_mn_est = r_mn.value.reshape((pauli_basis.dim, pauli_basis.dim)).transpose() return ProcessTomography(r_mn_est, pauli_basis, settings)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_fidelity(self, reference_unitary): """ Compute the quantum process fidelity of the estimated state with respect to a unitary process. For non-sparse reference_unitary, this implementation this will be expensive in higher dimensions. :param (qutip.Qobj|matrix-like) reference_unitary: A unitary operator that induces a process as ``rho -> other*rho*other.dag()``, can also be a superoperator or Pauli-transfer matrix. :return: The process fidelity, a real number between 0 and 1. :rtype: float """
if isinstance(reference_unitary, qt.Qobj): if not reference_unitary.issuper or reference_unitary.superrep != "super": sother = qt.to_super(reference_unitary) else: sother = reference_unitary tm_other = self.pauli_basis.transfer_matrix(sother) else: tm_other = csr_matrix(reference_unitary) dimension = self.pauli_basis.ops[0].shape[0] return np.trace(tm_other.T * self.r_est).real / dimension ** 2
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_kraus(self): """ Compute the Kraus operator representation of the estimated process. :return: The process as a list of Kraus operators. :rytpe: List[np.array] """
return [k.data.toarray() for k in qt.to_kraus(self.sop)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot_pauli_transfer_matrix(self, ax): """ Plot the elements of the Pauli transfer matrix. :param matplotlib.Axes ax: A matplotlib Axes object to plot into. """
title = "Estimated process" ut.plot_pauli_transfer_matrix(self.r_est, ax, self.pauli_basis.labels, title)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot(self): """ Visualize the process. :return: The generated figure. :rtype: matplotlib.Figure """
fig, (ax1) = plt.subplots(1, 1, figsize=(10, 8)) self.plot_pauli_transfer_matrix(ax1) return fig
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bit_reversal(qubits: List[int]) -> Program: """ Generate a circuit to do bit reversal. :param qubits: Qubits to do bit reversal with. :return: A program to do bit reversal. """
p = Program() n = len(qubits) for i in range(int(n / 2)): p.inst(SWAP(qubits[i], qubits[-i - 1])) return p
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def qft(qubits: List[int]) -> Program: """ Generate a program to compute the quantum Fourier transform on a set of qubits. :param qubits: A list of qubit indexes. :return: A Quil program to compute the Fourier transform of the qubits. """
p = Program().inst(_core_qft(qubits, 1)) return p + bit_reversal(qubits)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def inverse_qft(qubits: List[int]) -> Program: """ Generate a program to compute the inverse quantum Fourier transform on a set of qubits. :param qubits: A list of qubit indexes. :return: A Quil program to compute the inverse Fourier transform of the qubits. """
qft_result = Program().inst(_core_qft(qubits, -1)) qft_result += bit_reversal(qubits) inverse_qft = Program() while len(qft_result) > 0: new_inst = qft_result.pop() inverse_qft.inst(new_inst) return inverse_qft
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unitary_operator(state_vector): """ Uses QR factorization to create a unitary operator that can encode an arbitrary normalized vector into the wavefunction of a quantum state. Assumes that the state of the input qubits is to be expressed as .. math:: (1, 0, \\ldots, 0)^T :param 1d array state_vector: Normalized vector whose length is at least two and a power of two. :return: Unitary operator that encodes state_vector :rtype: 2d array """
if not np.allclose([np.linalg.norm(state_vector)], [1]): raise ValueError("Vector must be normalized") if 2 ** get_bits_needed(len(state_vector)) != len(state_vector): raise ValueError("Vector length must be a power of two and at least two") mat = np.identity(len(state_vector), dtype=complex) for i in range(len(state_vector)): mat[i, 0] = state_vector[i] U = np.linalg.qr(mat)[0] # make sure U|0> = |v> zero_state = np.zeros(len(U)) zero_state[0] = 1 if np.allclose(U.dot(zero_state), state_vector): return U else: # adjust phase if needed return -1 * U
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fix_norm_and_length(vector): """ Create a normalized and zero padded version of vector. :param 1darray vector: a vector with at least one nonzero component. :return: a vector that is the normalized version of vector, padded at the end with the smallest number of 0s necessary to make the length of the vector :math:`2^m` for some positive integer :math:`m`. :rtype: 1darray """
# normalize norm_vector = vector / np.linalg.norm(vector) # pad with zeros num_bits = get_bits_needed(len(vector)) state_vector = np.zeros(2 ** num_bits, dtype=complex) for i in range(len(vector)): state_vector[i] = norm_vector[i] return state_vector
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _create_bv_circuit(self, bit_map: Dict[str, str]) -> Program: """ Implementation of the Bernstein-Vazirani Algorithm. Given a list of input qubits and an ancilla bit, all initially in the :math:`\\vert 0\\rangle` state, create a program that can find :math:`\\vec{a}` with one query to the given oracle. :param Dict[String, String] bit_map: truth-table of a function for Bernstein-Vazirani with the keys being all possible bit vectors strings and the values being the function values :rtype: Program """
unitary, _ = self._compute_unitary_oracle_matrix(bit_map) full_bv_circuit = Program() full_bv_circuit.defgate("BV-ORACLE", unitary) # Put ancilla bit into minus state full_bv_circuit.inst(X(self.ancilla), H(self.ancilla)) full_bv_circuit.inst([H(i) for i in self.computational_qubits]) full_bv_circuit.inst( tuple(["BV-ORACLE"] + sorted(self.computational_qubits + [self.ancilla], reverse=True))) full_bv_circuit.inst([H(i) for i in self.computational_qubits]) return full_bv_circuit
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self, qc: QuantumComputer, bitstring_map: Dict[str, str]) -> 'BernsteinVazirani': """ Runs the Bernstein-Vazirani algorithm. Given a connection to a QVM or QPU, find the :math:`\\mathbf{a}` and :math:`b` corresponding to the function represented by the oracle function that will be constructed from the bitstring map. :param qc: connection to the QPU or QVM :param bitstring_map: a truth table describing the boolean function, whose dot-product vector and bias is to be found """
# initialize all attributes self.input_bitmap = bitstring_map self.n_qubits = len(list(bitstring_map.keys())[0]) self.computational_qubits = list(range(self.n_qubits)) self.ancilla = self.n_qubits # is the highest index now. # construct BV circuit self.bv_circuit = self._create_bv_circuit(bitstring_map) # find vector by running the full bv circuit full_circuit = Program() full_ro = full_circuit.declare('ro', 'BIT', len(self.computational_qubits) + 1) full_circuit += self.bv_circuit full_circuit += [MEASURE(qubit, ro) for qubit, ro in zip(self.computational_qubits, full_ro)] full_executable = qc.compile(full_circuit) full_results = qc.run(full_executable) bv_vector = full_results[0][::-1] # To get the bias term we skip the Walsh-Hadamard transform ancilla_circuit = Program() ancilla_ro = ancilla_circuit.declare('ro', 'BIT', len(self.computational_qubits) + 1) ancilla_circuit += self.bv_circuit ancilla_circuit += [MEASURE(self.ancilla, ancilla_ro[self.ancilla])] ancilla_executable = qc.compile(ancilla_circuit) ancilla_results = qc.run(ancilla_executable) bv_bias = ancilla_results[0][0] self.solution = ''.join([str(b) for b in bv_vector]), str(bv_bias) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_solution(self) -> Tuple[str, str]: """ Returns the solution of the BV algorithm :return: a tuple of string corresponding to the dot-product partner vector and the bias term :rtype: Tuple[String, String] """
if self.solution is None: raise AssertionError("You need to `run` this algorithm first") return self.solution
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_solution(self) -> bool: """ Checks if the the found solution correctly reproduces the input. :return: True if solution correctly reproduces input bitstring map :rtype: Bool """
if self.solution is None: raise AssertionError("You need to `run` this algorithm first") assert_map = create_bv_bitmap(*self.solution) return all([assert_map[k] == v for k, v in self.input_bitmap.items()])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def diagonal_basis_commutes(pauli_a, pauli_b): """ Test if `pauli_a` and `pauli_b` share a diagonal basis Example: Check if [A, B] with the constraint that A & B must share a one-qubit diagonalizing basis. If the inputs were [sZ(0), sZ(0) * sZ(1)] then this function would return True. If the inputs were [sX(5), sZ(4)] this function would return True. If the inputs were [sX(0), sY(0) * sZ(2)] this function would return False. :param pauli_a: Pauli term to check commutation against `pauli_b` :param pauli_b: Pauli term to check commutation against `pauli_a` :return: Boolean of commutation result :rtype: Bool """
overlapping_active_qubits = set(pauli_a.get_qubits()) & set(pauli_b.get_qubits()) for qubit_index in overlapping_active_qubits: if (pauli_a[qubit_index] != 'I' and pauli_b[qubit_index] != 'I' and pauli_a[qubit_index] != pauli_b[qubit_index]): return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_diagonalizing_basis(list_of_pauli_terms): """ Find the Pauli Term with the most non-identity terms :param list_of_pauli_terms: List of Pauli terms to check :return: The highest weight Pauli Term :rtype: PauliTerm """
qubit_ops = set(reduce(lambda x, y: x + y, [list(term._ops.items()) for term in list_of_pauli_terms])) qubit_ops = sorted(list(qubit_ops), key=lambda x: x[0]) return PauliTerm.from_list(list(map(lambda x: tuple(reversed(x)), qubit_ops)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _max_key_overlap(pauli_term, diagonal_sets): """ Calculate the max overlap of a pauli term ID with keys of diagonal_sets Returns a different key if we find any collisions. If no collisions is found then the pauli term is added and the key is updated so it has the largest weight. :param pauli_term: :param diagonal_sets: :return: dictionary where key value pair is tuple indicating diagonal basis and list of PauliTerms that share that basis :rtype: dict """
# a lot of the ugliness comes from the fact that # list(PauliTerm._ops.items()) is not the appropriate input for # Pauliterm.from_list() for key in list(diagonal_sets.keys()): pauli_from_key = PauliTerm.from_list( list(map(lambda x: tuple(reversed(x)), key))) if diagonal_basis_commutes(pauli_term, pauli_from_key): updated_pauli_set = diagonal_sets[key] + [pauli_term] diagonalizing_term = get_diagonalizing_basis(updated_pauli_set) if len(diagonalizing_term) > len(key): del diagonal_sets[key] new_key = tuple(sorted(diagonalizing_term._ops.items(), key=lambda x: x[0])) diagonal_sets[new_key] = updated_pauli_set else: diagonal_sets[key] = updated_pauli_set return diagonal_sets # made it through all keys and sets so need to make a new set else: # always need to sort because new pauli term functionality new_key = tuple(sorted(pauli_term._ops.items(), key=lambda x: x[0])) diagonal_sets[new_key] = [pauli_term] return diagonal_sets
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def commuting_sets_by_zbasis(pauli_sums): """ Computes commuting sets based on terms having the same diagonal basis Following the technique outlined in the appendix of arXiv:1704.05018. :param pauli_sums: PauliSum object to group :return: dictionary where key value pair is a tuple corresponding to the basis and a list of PauliTerms associated with that basis. """
diagonal_sets = {} for term in pauli_sums: diagonal_sets = _max_key_overlap(term, diagonal_sets) return diagonal_sets
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_trivial_commutation(pauli_list, single_pauli_term): """ Check if a PauliTerm trivially commutes with a list of other terms. :param list pauli_list: A list of PauliTerm objects :param PauliTerm single_pauli_term: A PauliTerm object :returns: True if pauli_two object commutes with pauli_list, False otherwise :rtype: bool """
if not isinstance(pauli_list, list): raise TypeError("pauli_list should be a list") for term in pauli_list: if not _commutes(term, single_pauli_term): return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def commuting_sets_by_indices(pauli_sums, commutation_check): """ For a list of pauli sums, find commuting sets and keep track of which pauli sum they came from. :param pauli_sums: A list of PauliSum :param commutation_check: a function that checks if all elements of a list and a single pauli term commute. :return: A list of commuting sets. Each set is a list of tuples (i, j) to find the particular commuting term. i is the index of the pauli sum from whence the term came. j is the index within the set. """
assert isinstance(pauli_sums, list) group_inds = [] group_terms = [] for i, pauli_sum in enumerate(pauli_sums): for j, term in enumerate(pauli_sum): if len(group_inds) == 0: # Initialization group_inds.append([(i, j)]) group_terms.append([term]) continue for k, group in enumerate(group_terms): if commutation_check(group, term): group_inds[k] += [(i, j)] group_terms[k] += [term] break else: # for ... else means loop completed without a `break` # Which means this needs to start its own group. group_inds.append([(i, j)]) group_terms.append([term]) return group_inds
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def commuting_sets_trivial(pauli_sum): """ Group a pauli term into commuting sets using trivial check :param pauli_sum: PauliSum term :return: list of lists containing individual Pauli Terms """
if not isinstance(pauli_sum, (PauliTerm, PauliSum)): raise TypeError("This method can only group PauliTerm or PauliSum objects") if isinstance(pauli_sum, PauliTerm): pauli_sum = PauliSum([pauli_sum]) commuting_terms = [] for term in pauli_sum: # find the group that it trivially commutes with for term_group in commuting_terms: if check_trivial_commutation(term_group, term): term_group.append(term) break else: commuting_terms.append([term]) return commuting_terms
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ising(h: List[int], J: Dict[Tuple[int, int], int], num_steps: int = 0, verbose: bool = True, rand_seed: int = None, connection: QuantumComputer = None, samples: int = None, initial_beta: List[float] = None, initial_gamma: List[float] = None, minimizer_kwargs: Dict[str, Any] = None, vqe_option: Dict[str, Union[bool, int]] = None) -> Tuple[List[int], Union[int, float], Program]: """ Ising set up method :param h: External magnetic term of the Ising problem. :param J: Interaction term of the Ising problem. :param num_steps: (Optional.Default=2 * len(h)) Trotterization order for the QAOA algorithm. :param verbose: (Optional.Default=True) Verbosity of the code. :param rand_seed: (Optional. Default=None) random seed when beta and gamma angles are not provided. :param connection: (Optional) connection to the QVM. Default is None. :param samples: (Optional. Default=None) VQE option. Number of samples (circuit preparation and measurement) to use in operator averaging. :param initial_beta: (Optional. Default=None) Initial guess for beta parameters. :param initial_gamma: (Optional. Default=None) Initial guess for gamma parameters. :param minimizer_kwargs: (Optional. Default=None). Minimizer optional arguments. If None set to {'method': 'Nelder-Mead', 'options': {'fatol': 1.0e-2, 'xatol': 1.0e-2, 'disp': False} :param vqe_option: (Optional. Default=None). VQE optional arguments. If None set to vqe_option = {'disp': print_fun, 'return_all': True, 'samples': samples} :return: Most frequent Ising string, Energy of the Ising string, Circuit used to obtain result. """
if num_steps == 0: num_steps = 2 * len(h) n_nodes = len(h) cost_operators = [] driver_operators = [] for i, j in J.keys(): cost_operators.append(PauliSum([PauliTerm("Z", i, J[(i, j)]) * PauliTerm("Z", j)])) for i in range(n_nodes): cost_operators.append(PauliSum([PauliTerm("Z", i, h[i])])) driver_operators.append(PauliSum([PauliTerm("X", i, -1.0)])) if connection is None: qubits = list(sum(J.keys(), ())) connection = get_qc(f"{len(qubits)}q-qvm") if minimizer_kwargs is None: minimizer_kwargs = {'method': 'Nelder-Mead', 'options': {'fatol': 1.0e-2, 'xatol': 1.0e-2, 'disp': False}} if vqe_option is None: vqe_option = {'disp': print, 'return_all': True, 'samples': samples} if not verbose: vqe_option['disp'] = None qaoa_inst = QAOA(connection, list(range(n_nodes)), steps=num_steps, init_betas=initial_beta, init_gammas=initial_gamma, cost_ham=cost_operators, ref_ham=driver_operators, minimizer=minimize, minimizer_kwargs=minimizer_kwargs, rand_seed=rand_seed, vqe_options=vqe_option, store_basis=True) betas, gammas = qaoa_inst.get_angles() most_freq_string, sampling_results = qaoa_inst.get_string(betas, gammas) most_freq_string_ising = [ising_trans(it) for it in most_freq_string] energy_ising = energy_value(h, J, most_freq_string_ising) param_prog = qaoa_inst.get_parameterized_program() circuit = param_prog(np.hstack((betas, gammas))) return most_freq_string_ising, energy_ising, circuit
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def swap_circuit_generator(register_a: List[int], register_b: List[int], ancilla: int) -> Program: """ Generate the swap test circuit primitive. Registers A and B must be of equivalent size for swap to work. This module uses the CSWAP gate in pyquil. :param register_a: qubit labels in the 'A' register :param register_b: qubit labels in the 'B' register :param ancilla: ancilla to measure and control the swap operation. """
if len(register_a) != len(register_b): raise RegisterSizeMismatch("registers involve different numbers of qubits") if not isinstance(register_a, list): raise TypeError("Register A needs to be list") if not isinstance(register_b, list): raise TypeError("Register B needs to be a list") if ancilla is None: ancilla = max(register_a + register_b) + 1 swap_program = Program() swap_program += H(ancilla) for a, b in zip(register_a, register_b): swap_program += CSWAP(ancilla, a, b) swap_program += H(ancilla) return swap_program
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_parameterized_program(self): """ Return a function that accepts parameters and returns a new Quil program. :returns: a function """
cost_para_programs = [] driver_para_programs = [] for idx in range(self.steps): cost_list = [] driver_list = [] for cost_pauli_sum in self.cost_ham: for term in cost_pauli_sum.terms: cost_list.append(exponential_map(term)) for driver_pauli_sum in self.ref_ham: for term in driver_pauli_sum.terms: driver_list.append(exponential_map(term)) cost_para_programs.append(cost_list) driver_para_programs.append(driver_list) def psi_ref(params): """ Construct a Quil program for the vector (beta, gamma). :param params: array of 2 . p angles, betas first, then gammas :return: a pyquil program object """ if len(params) != 2*self.steps: raise ValueError("params doesn't match the number of parameters set by `steps`") betas = params[:self.steps] gammas = params[self.steps:] prog = Program() prog += self.ref_state_prep for idx in range(self.steps): for fprog in cost_para_programs[idx]: prog += fprog(gammas[idx]) for fprog in driver_para_programs[idx]: prog += fprog(betas[idx]) return prog return psi_ref
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_angles(self) -> Tuple[List[float], List[float]]: """ Finds optimal angles with the quantum variational eigensolver method. Stored VQE result :returns: A tuple of the beta angles and the gamma angles for the optimal solution. """
stacked_params = np.hstack((self.betas, self.gammas)) vqe = VQE(self.minimizer, minimizer_args=self.minimizer_args, minimizer_kwargs=self.minimizer_kwargs) cost_ham = reduce(lambda x, y: x + y, self.cost_ham) # maximizing the cost function! param_prog = self.get_parameterized_program() result = vqe.vqe_run(param_prog, cost_ham, stacked_params, qc=self.qc, **self.vqe_options) self.result = result betas = result.x[:self.steps] gammas = result.x[self.steps:] return betas, gammas
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def probabilities(self, angles: List[float]) -> np.ndarray: """ Computes the probability of each state given a particular set of angles. :param angles: A concatenated list of angles [betas]+[gammas] :return: The probabilities of each outcome given those angles. """
if isinstance(angles, list): angles = np.array(angles) assert angles.shape[0] == 2 * self.steps, "angles must be 2 * steps" param_prog = self.get_parameterized_program() prog = param_prog(angles) wf = WavefunctionSimulator().wavefunction(prog) wf = wf.amplitudes.reshape((-1, 1)) probs = np.zeros_like(wf) for xx in range(2 ** len(self.qubits)): probs[xx] = np.conj(wf[xx]) * wf[xx] return probs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_string(self, betas: List[float], gammas: List[float], samples: int = 100): """ Compute the most probable string. The method assumes you have passed init_betas and init_gammas with your pre-computed angles or you have run the VQE loop to determine the angles. If you have not done this you will be returning the output for a random set of angles. :param betas: List of beta angles :param gammas: List of gamma angles :param samples: (Optional) number of samples to get back from the QuantumComputer. :returns: tuple representing the bitstring, Counter object from collections holding all output bitstrings and their frequency. """
if samples <= 0 and not isinstance(samples, int): raise ValueError("samples variable must be positive integer") param_prog = self.get_parameterized_program() stacked_params = np.hstack((betas, gammas)) sampling_prog = Program() ro = sampling_prog.declare('ro', 'BIT', len(self.qubits)) sampling_prog += param_prog(stacked_params) sampling_prog += [MEASURE(qubit, r) for qubit, r in zip(self.qubits, ro)] sampling_prog.wrap_in_numshots_loop(samples) executable = self.qc.compile(sampling_prog) bitstring_samples = self.qc.run(executable) bitstring_tuples = list(map(tuple, bitstring_samples)) freq = Counter(bitstring_tuples) most_frequent_bit_string = max(freq, key=lambda x: freq[x]) return most_frequent_bit_string, freq
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _compute_grover_oracle_matrix(bitstring_map: Dict[str, int]) -> np.ndarray: """ Computes the unitary matrix that encodes the oracle function for Grover's algorithm :param bitstring_map: dict with string keys corresponding to bitstrings, and integer values corresponding to the desired phase on the output state. :return: a numpy array corresponding to the unitary matrix for oracle for the given bitstring_map """
n_bits = len(list(bitstring_map.keys())[0]) oracle_matrix = np.zeros(shape=(2 ** n_bits, 2 ** n_bits)) for b in range(2 ** n_bits): pad_str = np.binary_repr(b, n_bits) phase_factor = bitstring_map[pad_str] oracle_matrix[b, b] = phase_factor return oracle_matrix
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _construct_grover_circuit(self) -> None: """ Constructs an instance of Grover's Algorithm, using initialized values. :return: None """
oracle = Program() oracle_name = "GROVER_ORACLE" oracle.defgate(oracle_name, self.unitary_function_mapping) oracle.inst(tuple([oracle_name] + self.qubits)) self.grover_circuit = self.oracle_grover(oracle, self.qubits)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _init_attr(self, bitstring_map: Dict[str, int]) -> None: """ Initializes an instance of Grover's Algorithm given a bitstring_map. :param bitstring_map: dict with string keys corresponding to bitstrings, and integer values corresponding to the desired phase on the output state. :type bitstring_map: Dict[String, Int] :return: None """
self.bit_map = bitstring_map self.unitary_function_mapping = self._compute_grover_oracle_matrix(bitstring_map) self.n_qubits = self.unitary_function_mapping.shape[0] self.qubits = list(range(int(np.log2(self.n_qubits)))) self._construct_grover_circuit()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_bitstring(self, qc: QuantumComputer, bitstring_map: Dict[str, int]) -> str: """ Runs Grover's Algorithm to find the bitstring that is designated by ``bistring_map``. In particular, this will prepare an initial state in the uniform superposition over all bit- strings, an then use Grover's Algorithm to pick out the desired bitstring. :param qc: the connection to the Rigetti cloud to run pyQuil programs. :param bitstring_map: a mapping from bitstrings to the phases that the oracle should impart on them. If the oracle should "look" for a bitstring, it should have a ``-1``, otherwise it should have a ``1``. :return: Returns the bitstring resulting from measurement after Grover's Algorithm. """
self._init_attr(bitstring_map) ro = self.grover_circuit.declare('ro', 'BIT', len(self.qubits)) self.grover_circuit += [MEASURE(qubit, ro[idx]) for idx, qubit in enumerate(self.qubits)] executable = qc.compile(self.grover_circuit) sampled_bitstring = qc.run(executable) return "".join([str(bit) for bit in sampled_bitstring[0]])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def oracle_grover(oracle: Program, qubits: List[int], num_iter: int = None) -> Program: """ Implementation of Grover's Algorithm for a given oracle. :param oracle: An oracle defined as a Program. It should send :math:`\ket{x}` to :math:`(-1)^{f(x)}\ket{x}`, where the range of f is {0, 1}. :param qubits: List of qubits for Grover's Algorithm. :param num_iter: The number of iterations to repeat the algorithm for. The default is the integer closest to :math:`\frac{\pi}{4}\sqrt{N}`, where :math:`N` is the size of the domain. :return: A program corresponding to the desired instance of Grover's Algorithm. """
if num_iter is None: num_iter = int(round(np.pi * 2 ** (len(qubits) / 2.0 - 2.0))) uniform_superimposer = Program().inst([H(qubit) for qubit in qubits]) amp_prog = amplification_circuit(uniform_superimposer, oracle, qubits, num_iter) return amp_prog
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_rotation_parameters(phases, magnitudes): """ Simulates one step of rotations. Given lists of phases and magnitudes of the same length :math:`N`, such that :math:`N=2^n` for some positive integer :math:`n`, finds the rotation angles required for one step of phase and magnitude unification. :param list phases: real valued phases from :math:`-\\pi` to :math:`\\pi`. :param list magnitudes: positive, real value magnitudes such that the sum of the square of each magnitude is :math:`2^{-m}` for some nonnegative integer :math:`m`. :return: A tuple t of four lists such that - t[0] are the z-rotations needed to unify adjacent pairs of phases - t[1] are the y-rotations needed to unify adjacent pairs of magnitudes - t[2] are the updated phases after these rotations are applied - t[3] are the updated magnitudes after these rotations are applied :rtype: tuple """
# will hold the angles for controlled rotations # in the phase unification and probability unification steps, # respectively z_thetas = [] y_thetas = [] # will hold updated phases and magnitudes after rotations new_phases = [] new_magnitudes = [] for i in range(0, len(phases), 2): # find z rotation angles phi = phases[i] psi = phases[i + 1] z_thetas.append(phi - psi) # update phases after applying such rotations kappa = (phi + psi) / 2. new_phases.append(kappa) # find y rotation angles a = magnitudes[i] b = magnitudes[i + 1] if a == 0 and b == 0: y_thetas.append(0) else: y_thetas.append( 2 * np.arcsin((a - b) / (np.sqrt(2 * (a ** 2 + b ** 2))))) # update magnitudes after applying such rotations c = np.sqrt((a ** 2 + b ** 2) / 2.) new_magnitudes.append(c) return z_thetas, y_thetas, new_phases, new_magnitudes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_reversed_unification_program(angles, control_indices, target, controls, mode): """ Gets the Program representing the reversed circuit for the decomposition of the uniformly controlled rotations in a unification step. If :math:`n` is the number of controls, the indices within control indices must range from 1 to :math:`n`, inclusive. The length of control_indices and the length of angles must both be :math:`2^n`. :param list angles: The angles of rotation in the the decomposition, in order from left to right :param list control_indices: a list of positions for the controls of the CNOTs used when decomposing uniformly controlled rotations; see get_cnot_control_positions for labelling conventions. :param int target: Index of the target of all rotations :param list controls: Index of the controls, in order from bottom to top. :param str mode: The unification mode. Is either 'phase', corresponding to controlled RZ rotations, or 'magnitude', corresponding to controlled RY rotations. :return: The reversed circuit of this unification step. :rtype: Program """
if mode == 'phase': gate = RZ elif mode == 'magnitude': gate = RY else: raise ValueError("mode must be \'phase\' or \'magnitude\'") reversed_gates = [] for j in range(len(angles)): if angles[j] != 0: # angle is negated in conjugated/reversed circuit reversed_gates.append(gate(-angles[j], target)) if len(controls) > 0: reversed_gates.append(CNOT(controls[control_indices[j] - 1], target)) return Program().inst(reversed_gates[::-1])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_ancestors(self): """Returns a list of ancestors of the node. Ordered from the earliest. :return: node's ancestors, ordered from most recent :rtype: list(FenwickNode) """
node = self ancestor_list = [] while node.parent is not None: ancestor_list.append(node.parent) node = node.parent return ancestor_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bitwise_dot_product(bs0: str, bs1: str) -> str: """ A helper to calculate the bitwise dot-product between two string representing bit-vectors :param bs0: String of 0's and 1's representing a number in binary representations :param bs1: String of 0's and 1's representing a number in binary representations :return: 0 or 1 as a string corresponding to the dot-product value """
if len(bs0) != len(bs1): raise ValueError("Bit strings are not of equal length") return str(sum([int(bs0[i]) * int(bs1[i]) for i in range(len(bs0))]) % 2)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def notebook_mode(m): """ Configure whether this module should assume that it is being run from a jupyter notebook. This sets some global variables related to how progress for long measurement sequences is indicated. :param bool m: If True, assume to be in notebook. :return: None :rtype: NoneType """
global NOTEBOOK_MODE global TRANGE NOTEBOOK_MODE = m if NOTEBOOK_MODE: TRANGE = tqdm.tnrange else: TRANGE = tqdm.trange
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sample_outcomes(probs, n): """ random samples. :param list probs: A list of probabilities. :param Number n: The number of random samples to draw. :rtype: numpy.ndarray """
dist = np.cumsum(probs) rs = np.random.rand(n) return np.array([(np.where(r < dist)[0][0]) for r in rs])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sample_bad_readout(program, num_samples, assignment_probs, cxn): """ Generate `n` samples of measuring all outcomes of a Quil `program` assuming the assignment probabilities `assignment_probs` by simulating the wave function on a qvm QVMConnection `cxn` :param pyquil.quil.Program program: The program. :param int num_samples: The number of samples :param numpy.ndarray assignment_probs: A matrix of assignment probabilities :param QVMConnection cxn: the QVM connection. :return: The resulting sampled outcomes from assignment_probs applied to cxn, one dimensional. :rtype: numpy.ndarray """
wf = cxn.wavefunction(program) return sample_outcomes(assignment_probs.dot(abs(wf.amplitudes.ravel())**2), num_samples)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot_pauli_transfer_matrix(ptransfermatrix, ax, labels, title): """ Visualize the Pauli Transfer Matrix of a process. :param numpy.ndarray ptransfermatrix: The Pauli Transfer Matrix :param ax: The matplotlib axes. :param labels: The labels for the operator basis states. :param title: The title for the plot :return: The modified axis object. :rtype: AxesSubplot """
im = ax.imshow(ptransfermatrix, interpolation="nearest", cmap=rigetti_3_color_cm, vmin=-1, vmax=1) dim = len(labels) plt.colorbar(im, ax=ax) ax.set_xticks(range(dim)) ax.set_xlabel("Input Pauli Operator", fontsize=20) ax.set_yticks(range(dim)) ax.set_ylabel("Output Pauli Operator", fontsize=20) ax.set_title(title, fontsize=25) ax.set_xticklabels(labels, rotation=45) ax.set_yticklabels(labels) ax.grid(False) return ax
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def state_histogram(rho, ax=None, title="", threshold=0.001): """ Visualize a density matrix as a 3d bar plot with complex phase encoded as the bar color. This code is a modified version of `an equivalent function in qutip <http://qutip.org/docs/3.1.0/apidoc/functions.html#qutip.visualization.matrix_histogram_complex>`_ which is released under the (New) BSD license. :param qutip.Qobj rho: The density matrix. :param Axes3D ax: The axes object. :param str title: The axes title. :param float threshold: (Optional) minimum magnitude of matrix elements. Values below this are hidden. :return: The axis :rtype: mpl_toolkits.mplot3d.Axes3D """
rho_amps = rho.data.toarray().ravel() nqc = int(round(np.log2(rho.shape[0]))) if ax is None: fig = plt.figure(figsize=(10, 6)) ax = Axes3D(fig, azim=-35, elev=35) cmap = rigetti_4_color_cm norm = mpl.colors.Normalize(-np.pi, np.pi) colors = cmap(norm(np.angle(rho_amps))) dzs = abs(rho_amps) colors[:, 3] = 1.0 * (dzs > threshold) xs, ys = np.meshgrid(range(2 ** nqc), range(2 ** nqc)) xs = xs.ravel() ys = ys.ravel() zs = np.zeros_like(xs) dxs = dys = np.ones_like(xs) * 0.8 _ = ax.bar3d(xs, ys, zs, dxs, dys, dzs, color=colors) ax.set_xticks(np.arange(2 ** nqc) + .4) ax.set_xticklabels(basis_labels(nqc)) ax.set_yticks(np.arange(2 ** nqc) + .4) ax.set_yticklabels(basis_labels(nqc)) ax.set_zlim3d([0, 1]) cax, kw = mpl.colorbar.make_axes(ax, shrink=.75, pad=.1) cb = mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm) cb.set_ticks([-np.pi, -np.pi / 2, 0, np.pi / 2, np.pi]) cb.set_ticklabels((r'$-\pi$', r'$-\pi/2$', r'$0$', r'$\pi/2$', r'$\pi$')) cb.set_label('arg') ax.view_init(azim=-55, elev=45) ax.set_title(title) return ax
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bitlist_to_int(bitlist): """Convert a binary bitstring into the corresponding unsigned integer. :param list bitlist: A list of ones of zeros. :return: The corresponding integer. :rtype: int """
ret = 0 for b in bitlist: ret = (ret << 1) | (int(b) & 1) return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sample_assignment_probs(qubits, nsamples, cxn): """ Sample the assignment probabilities of qubits using nsamples per measurement, and then compute the estimated assignment probability matrix. See the docstring for estimate_assignment_probs for more information. :param list qubits: Qubits to sample the assignment probabilities for. :param int nsamples: The number of samples to use in each measurement. :param QPUConnection|QVMConnection cxn: The Connection object to connect to Forest. :return: The assignment probability matrix. :rtype: numpy.ndarray """
num_qubits = len(qubits) dimension = 2 ** num_qubits hists = [] preps = basis_state_preps(*qubits) jobs = [] _log.info('Submitting jobs...') for jj, p in izip(TRANGE(dimension), preps): jobs.append(cxn.run_and_measure_async(p, qubits, nsamples)) _log.info('Waiting for results...') for jj, job_id in izip(TRANGE(dimension), jobs): job = cxn.wait_for_job(job_id) results = job.result() idxs = list(map(bitlist_to_int, results)) hists.append(make_histogram(idxs, dimension)) return estimate_assignment_probs(hists)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_in_parallel(programs, nsamples, cxn, shuffle=True): """ Take sequences of Protoquil programs on disjoint qubits and execute a single sequence of programs that executes the input programs in parallel. Optionally randomize within each qubit-specific sequence. The programs are passed as a 2d array of Quil programs, where the (first) outer axis iterates over disjoint sets of qubits that the programs involve and the inner axis iterates over a sequence of related programs, e.g., tomography sequences, on the same set of qubits. :param Union[np.ndarray,List[List[Program]]] programs: A rectangular list of lists, or a 2d array of Quil Programs. The outer list iterates over disjoint qubit groups as targets, the inner list over programs to run on those qubits, e.g., tomographic sequences. :param int nsamples: Number of repetitions for executing each Program. :param QPUConnection|QVMConnection cxn: The quantum machine connection. :param bool shuffle: If True, the order of each qubit specific sequence (2nd axis) is randomized Default is True. :return: An array of 2d arrays that provide bitstring histograms for each input program. The axis of the outer array iterates over the disjoint qubit groups, the outer axis of the inner 2d array iterates over the programs for that group and the inner most axis iterates over all possible bitstrings for the qubit group under consideration. :rtype np.array """
if shuffle: n_groups = len(programs) n_progs_per_group = len(programs[0]) permutations = np.outer(np.ones(n_groups, dtype=int), np.arange(n_progs_per_group, dtype=int)) inverse_permutations = np.zeros_like(permutations) for jj in range(n_groups): # in-place operation np.random.shuffle(permutations[jj]) # store inverse permutation inverse_permutations[jj] = np.argsort(permutations[jj]) # apply to programs shuffled_programs = np.empty((n_groups, n_progs_per_group), dtype=object) for jdx, (progsj, pj) in enumerate(zip(programs, permutations)): shuffled_programs[jdx] = [progsj[pjk] for pjk in pj] shuffled_results = _run_in_parallel(shuffled_programs, nsamples, cxn) # reverse shuffling of results results = np.array([resultsj[pj] for resultsj, pj in zip(shuffled_results, inverse_permutations)]) return results else: return _run_in_parallel(programs, nsamples, cxn)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_imaginary_terms(pauli_sums: PauliSum) -> PauliSum: """ Remove the imaginary component of each term in a Pauli sum. :param pauli_sums: The Pauli sum to process. :return: a purely Hermitian Pauli sum. """
if not isinstance(pauli_sums, PauliSum): raise TypeError("not a pauli sum. please give me one") new_term = sI(0) * 0.0 for term in pauli_sums: new_term += term_with_coeff(term, term.coefficient.real) return new_term
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_rotation_program(pauli_term: PauliTerm) -> Program: """ Generate a rotation program so that the pauli term is diagonal. :param pauli_term: The Pauli term used to generate diagonalizing one-qubit rotations. :return: The rotation program. """
meas_basis_change = Program() for index, gate in pauli_term: if gate == 'X': meas_basis_change.inst(RY(-np.pi / 2, index)) elif gate == 'Y': meas_basis_change.inst(RX(np.pi / 2, index)) elif gate == 'Z': pass else: raise ValueError() return meas_basis_change
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def controlled(m: np.ndarray) -> np.ndarray: """ Make a one-qubit-controlled version of a matrix. :param m: A matrix. :return: A controlled version of that matrix. """
rows, cols = m.shape assert rows == cols n = rows I = np.eye(n) Z = np.zeros((n, n)) controlled_m = np.bmat([[I, Z], [Z, m]]) return controlled_m
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def phase_estimation(U: np.ndarray, accuracy: int, reg_offset: int = 0) -> Program: """ Generate a circuit for quantum phase estimation. :param U: A unitary matrix. :param accuracy: Number of bits of accuracy desired. :param reg_offset: Where to start writing measurements (default 0). :return: A Quil program to perform phase estimation. """
assert isinstance(accuracy, int) rows, cols = U.shape m = int(log2(rows)) output_qubits = range(0, accuracy) U_qubits = range(accuracy, accuracy + m) p = Program() ro = p.declare('ro', 'BIT', len(output_qubits)) # Hadamard initialization for i in output_qubits: p.inst(H(i)) # Controlled unitaries for i in output_qubits: if i > 0: U = np.dot(U, U) cU = controlled(U) name = "CONTROLLED-U{0}".format(2 ** i) # define the gate p.defgate(name, cU) # apply it p.inst((name, i) + tuple(U_qubits)) # Compute the QFT p = p + inverse_qft(output_qubits) # Perform the measurements for i in output_qubits: p.measure(i, ro[reg_offset + i]) return p
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def binary_float_to_decimal_float(number: Union[float, str]) -> float: """ Convert binary floating point to decimal floating point. :param number: Binary floating point. :return: Decimal floating point representation of binary floating point. """
if isinstance(number, str): if number[0] == '-': n_sign = -1 else: n_sign = 1 elif isinstance(number, float): n_sign = np.sign(number) number = str(number) deci = 0 for ndx, val in enumerate(number.split('.')[-1]): deci += float(val) / 2**(ndx+1) deci *= n_sign return deci
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def measurements_to_bf(measurements: np.ndarray) -> float: """ Convert measurements into gradient binary fraction. :param measurements: Output measurements of gradient program. :return: Binary fraction representation of gradient estimate. """
try: measurements.sum(axis=0) except AttributeError: measurements = np.asarray(measurements) finally: stats = measurements.sum(axis=0) / len(measurements) stats_str = [str(int(i)) for i in np.round(stats[::-1][1:])] bf_str = '0.' + ''.join(stats_str) bf = float(bf_str) return bf
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def amplification_circuit(algorithm: Program, oracle: Program, qubits: List[int], num_iter: int, decompose_diffusion: bool = False) -> Program: """ Returns a program that does ``num_iter`` rounds of amplification, given a measurement-less algorithm, an oracle, and a list of qubits to operate on. :param algorithm: A program representing a measurement-less algorithm run on qubits. :param oracle: An oracle maps any basis vector ``|psi>`` to either ``+|psi>`` or ``-|psi>`` depending on whether ``|psi>`` is in the desirable subspace or the undesirable subspace. :param qubits: the qubits to operate on :param num_iter: number of iterations of amplifications to run :param decompose_diffusion: If True, decompose the Grover diffusion gate into two qubit gates. If False, use a defgate to define the gate. :return: The amplified algorithm. """
program = Program() uniform_superimposer = Program().inst([H(qubit) for qubit in qubits]) program += uniform_superimposer if decompose_diffusion: diffusion = decomposed_diffusion_program(qubits) else: diffusion = diffusion_program(qubits) # To avoid redefining gates, we collect them before building our program. defined_gates = oracle.defined_gates + algorithm.defined_gates + diffusion.defined_gates for _ in range(num_iter): program += (oracle.instructions + algorithm.dagger().instructions + diffusion.instructions + algorithm.instructions) # We redefine the gates in the new program. for gate in defined_gates: program.defgate(gate.name, gate.matrix) return program
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _operator_generator(index, conj): """ Internal method to generate the appropriate operator """
pterm = PauliTerm('I', 0, 1.0) Zstring = PauliTerm('I', 0, 1.0) for j in range(index): Zstring = Zstring*PauliTerm('Z', j, 1.0) pterm1 = Zstring*PauliTerm('X', index, 0.5) scalar = 0.5 * conj * 1.0j pterm2 = Zstring*PauliTerm('Y', index, scalar) pterm = pterm * (pterm1 + pterm2) pterm = pterm.simplify() return pterm
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def maxcut_qaoa(graph, steps=1, rand_seed=None, connection=None, samples=None, initial_beta=None, initial_gamma=None, minimizer_kwargs=None, vqe_option=None): """ Max cut set up method :param graph: Graph definition. Either networkx or list of tuples :param steps: (Optional. Default=1) Trotterization order for the QAOA algorithm. :param rand_seed: (Optional. Default=None) random seed when beta and gamma angles are not provided. :param connection: (Optional) connection to the QVM. Default is None. :param samples: (Optional. Default=None) VQE option. Number of samples (circuit preparation and measurement) to use in operator averaging. :param initial_beta: (Optional. Default=None) Initial guess for beta parameters. :param initial_gamma: (Optional. Default=None) Initial guess for gamma parameters. :param minimizer_kwargs: (Optional. Default=None). Minimizer optional arguments. If None set to ``{'method': 'Nelder-Mead', 'options': {'ftol': 1.0e-2, 'xtol': 1.0e-2, 'disp': False}`` :param vqe_option: (Optional. Default=None). VQE optional arguments. If None set to ``vqe_option = {'disp': print_fun, 'return_all': True, 'samples': samples}`` """
if not isinstance(graph, nx.Graph) and isinstance(graph, list): maxcut_graph = nx.Graph() for edge in graph: maxcut_graph.add_edge(*edge) graph = maxcut_graph.copy() cost_operators = [] driver_operators = [] for i, j in graph.edges(): cost_operators.append(PauliTerm("Z", i, 0.5)*PauliTerm("Z", j) + PauliTerm("I", 0, -0.5)) for i in graph.nodes(): driver_operators.append(PauliSum([PauliTerm("X", i, -1.0)])) if connection is None: connection = get_qc(f"{len(graph.nodes)}q-qvm") if minimizer_kwargs is None: minimizer_kwargs = {'method': 'Nelder-Mead', 'options': {'ftol': 1.0e-2, 'xtol': 1.0e-2, 'disp': False}} if vqe_option is None: vqe_option = {'disp': print, 'return_all': True, 'samples': samples} qaoa_inst = QAOA(connection, list(graph.nodes()), steps=steps, cost_ham=cost_operators, ref_ham=driver_operators, store_basis=True, rand_seed=rand_seed, init_betas=initial_beta, init_gammas=initial_gamma, minimizer=minimize, minimizer_kwargs=minimizer_kwargs, vqe_options=vqe_option) return qaoa_inst
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def default_rotations(*qubits): """ Generates the Quil programs for the tomographic pre- and post-rotations of any number of qubits. :param list qubits: A list of qubits to perform tomography on. """
for gates in cartesian_product(TOMOGRAPHY_GATES.keys(), repeat=len(qubits)): tomography_program = Program() for qubit, gate in izip(qubits, gates): tomography_program.inst(gate(qubit)) yield tomography_program
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def default_channel_ops(nqubits): """ Generate the tomographic pre- and post-rotations of any number of qubits as qutip operators. :param int nqubits: The number of qubits to perform tomography on. :return: Qutip object corresponding to the tomographic rotation. :rtype: Qobj """
for gates in cartesian_product(TOMOGRAPHY_GATES.values(), repeat=nqubits): yield qt.tensor(*gates)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_functional(cls): """ Checks lazily whether a convex solver is installed that handles positivity constraints. :return: True if a solver supporting positivity constraints is installed. :rtype: bool """
if not cls._tested: cls._tested = True np.random.seed(SEED) test_problem_dimension = 10 mat = np.random.randn(test_problem_dimension, test_problem_dimension) posmat = mat.dot(mat.T) posvar = cvxpy.Variable(test_problem_dimension, test_problem_dimension) prob = cvxpy.Problem(cvxpy.Minimize((cvxpy.trace(posmat * posvar) + cvxpy.norm(posvar))), [posvar >> 0, cvxpy.trace(posvar) >= 1.]) try: prob.solve(SOLVER) cls._functional = True except cvxpy.SolverError: # pragma no coverage _log.warning("No convex SDP solver found. You will not be able to solve" " tomography problems with matrix positivity constraints.") return cls._functional
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def measure_wf_coefficients(prep_program, coeff_list, reference_state, quantum_resource, variance_bound=1.0E-6): """ Measure a set of coefficients with a phase relative to the reference_state :param prep_program: pyQuil program to prepare the state :param coeff_list: list of integers labeling amplitudes to measure :param reference_state: Integer of the computational basis state to use as a reference :param quantum_resource: An instance of a quantum abstract machine :param variance_bound: Default 1.0E-6. variance of the monte carlo estimator for the non-hermitian operator :return: returns a list of reference_state amplitude + coeff_list amplitudes """
num_qubits = len(prep_program.get_qubits()) normalizer_ops = projector_generator(reference_state, reference_state) c0_coeff, _, _ = estimate_locally_commuting_operator( prep_program, normalizer_ops, variance_bound=variance_bound, quantum_resource=quantum_resource) c0_coeff = np.sqrt(c0_coeff) amplitudes = [] for ii in coeff_list: if ii == reference_state: amplitudes.append(c0_coeff) else: bra = list(map(int, np.binary_repr(ii, width=num_qubits))) c_ii_op = projector_generator(reference_state, bra) result = estimate_locally_commuting_operator( prep_program, c_ii_op, variance_bound=variance_bound, quantum_resource=quantum_resource) amplitudes.append(result[0] / c0_coeff) return amplitudes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def measure_pure_state(prep_program, reference_state, quantum_resource, variance_bound=1.0E-6): """ Measure the coefficients of the pure state :param prep_program: pyQuil program to prepare the state :param reference_state: Integer of the computational basis state to use as a reference :param quantum_resource: An instance of a quantum abstract machine :param variance_bound: Default 1.0E-6. variance of the monte carlo estimator for the non-hermitian operator :return: an estimate of the wavefunction as a numpy.ndarray """
num_qubits = len(prep_program.get_qubits()) amplitudes_to_measure = list(range(2 ** num_qubits)) amplitudes = measure_wf_coefficients(prep_program, amplitudes_to_measure, reference_state, quantum_resource, variance_bound=variance_bound) wavefunction = np.asarray(amplitudes) return wavefunction.reshape((-1, 1))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build(self): """Builds this controlled gate. :return: The controlled gate, defined by this object. :rtype: Program """
self.defined_gates = set(STANDARD_GATE_NAMES) prog = self._recursive_builder(self.operation, self.gate_name, self.control_qubits, self.target_qubit) return prog
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _defgate(self, program, gate_name, gate_matrix): """Defines a gate named gate_name with matrix gate_matrix in program. In addition, updates self.defined_gates to track what has been defined. :param Program program: Pyquil Program to add the defgate and gate to. :param str gate_name: The name of the gate to add to program. :param numpy.ndarray gate_matrix: The array corresponding to the gate to define. :return: the modified Program. :retype: Program """
new_program = pq.Program() new_program += program if gate_name not in self.defined_gates: new_program.defgate(gate_name, gate_matrix) self.defined_gates.add(gate_name) return new_program
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parity_even_p(state, marked_qubits): """ Calculates the parity of elements at indexes in marked_qubits Parity is relative to the binary representation of the integer state. :param state: The wavefunction index that corresponds to this state. :param marked_qubits: The indexes to be considered in the parity sum. :returns: A boolean corresponding to the parity. """
assert isinstance(state, int), \ f"{state} is not an integer. Must call parity_even_p with an integer state." mask = 0 for q in marked_qubits: mask |= 1 << q return bin(mask & state).count("1") % 2 == 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def vqe_run(self, variational_state_evolve, hamiltonian, initial_params, gate_noise=None, measurement_noise=None, jacobian=None, qc=None, disp=None, samples=None, return_all=False): """ functional minimization loop. :param variational_state_evolve: function that takes a set of parameters and returns a pyQuil program. :param hamiltonian: (PauliSum) object representing the hamiltonian of which to take the expectation value. :param initial_params: (ndarray) vector of initial parameters for the optimization :param gate_noise: list of Px, Py, Pz probabilities of gate being applied to every gate after each get application :param measurement_noise: list of Px', Py', Pz' probabilities of a X, Y or Z being applied before a measurement. :param jacobian: (optional) method of generating jacobian for parameters (Default=None). :param qc: (optional) QuantumComputer object. :param disp: (optional, bool) display level. If True then each iteration expectation and parameters are printed at each optimization iteration. :param samples: (int) Number of samples for calculating the expectation value of the operators. If `None` then faster method ,dotting the wave function with the operator, is used. Default=None. :param return_all: (optional, bool) request to return all intermediate parameters determined during the optimization. :return: (vqe.OptResult()) object :func:`OptResult <vqe.OptResult>`. The following fields are initialized in OptResult: -x: set of w.f. ansatz parameters -fun: scalar value of the objective function -iteration_params: a list of all intermediate parameter vectors. Only returned if 'return_all=True' is set as a vqe_run() option. -expectation_vals: a list of all intermediate expectation values. Only returned if 'return_all=True' is set as a vqe_run() option. """
self._disp_fun = disp if disp is not None else lambda x: None iteration_params = [] expectation_vals = [] self._current_expectation = None if samples is None: print("""WARNING: Fast method for expectation will be used. Noise models will be ineffective""") if qc is None: qubits = hamiltonian.get_qubits() qc = QuantumComputer(name=f"{len(qubits)}q-noisy-qvm", qam=QVM(gate_noise=gate_noise, measurement_noise=measurement_noise)) else: self.qc = qc def objective_function(params): """ closure representing the functional :param params: (ndarray) vector of parameters for generating the the function of the functional. :return: (float) expectation value """ pyquil_prog = variational_state_evolve(params) mean_value = self.expectation(pyquil_prog, hamiltonian, samples, qc) self._current_expectation = mean_value # store for printing return mean_value def print_current_iter(iter_vars): self._disp_fun("\tParameters: {} ".format(iter_vars)) if jacobian is not None: grad = jacobian(iter_vars) self._disp_fun("\tGrad-L1-Norm: {}".format(np.max(np.abs(grad)))) self._disp_fun("\tGrad-L2-Norm: {} ".format(np.linalg.norm(grad))) self._disp_fun("\tE => {}".format(self._current_expectation)) if return_all: iteration_params.append(iter_vars) expectation_vals.append(self._current_expectation) # using self.minimizer arguments = funcsigs.signature(self.minimizer).parameters.keys() if disp is not None and 'callback' in arguments: self.minimizer_kwargs['callback'] = print_current_iter args = [objective_function, initial_params] args.extend(self.minimizer_args) if 'jac' in arguments: self.minimizer_kwargs['jac'] = jacobian result = self.minimizer(*args, **self.minimizer_kwargs) if hasattr(result, 'status'): if result.status != 0: self._disp_fun("Classical optimization exited with an error index: %i" % result.status) results = OptResults() if hasattr(result, 'x'): results.x = result.x results.fun = result.fun else: results.x = result if return_all: results.iteration_params = iteration_params results.expectation_vals = expectation_vals return results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_path_from_parent(self, parent): """ Return a list of PathInfos containing the path from the parent model to the current model, or an empty list if parent is not a parent of the current model. """
if hasattr(self, 'get_path_from_parent'): return self.get_path_from_parent(parent) if self.model is parent: return [] model = self.concrete_model # Get a reversed base chain including both the current and parent # models. chain = model._meta.get_base_chain(parent) or [] chain.reverse() chain.append(model) # Construct a list of the PathInfos between models in chain. path = [] for i, ancestor in enumerate(chain[:-1]): child = chain[i + 1] link = child._meta.get_ancestor_link(ancestor) path.extend(link.get_reverse_path_info()) return path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def patience_sort(xs): '''Patience sort an iterable, xs. This function generates a series of pairs (x, pile), where "pile" is the 0-based index of the pile "x" should be placed on top of. Elements of "xs" must be less-than comparable. ''' pile_tops = list() for x in xs: pile = bisect.bisect_left(pile_tops, x) if pile == len(pile_tops): pile_tops.append(x) else: pile_tops[pile] = x yield x, pile
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def longest_monotonic_subseq_length(xs): '''Return the length of the longest monotonic subsequence of xs, second return value is the difference between increasing and decreasing lengths. >>> longest_monotonic_subseq_length((4, 5, 1, 2, 3)) (3, 1) >>> longest_monotonic_subseq_length((1, 2, 3, 5, 4)) (4, 2) >>> longest_monotonic_subseq_length((1, 2, 1)) (2, 0) ''' li = longest_increasing_subseq_length(xs) ld = longest_decreasing_subseq_length(xs) return max(li, ld), li - ld
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def longest_increasing_subsequence(xs): '''Return a longest increasing subsequence of xs. (Note that there may be more than one such subsequence.) >>> longest_increasing_subsequence(range(3)) [0, 1, 2] >>> longest_increasing_subsequence([3, 1, 2, 0]) [1, 2] ''' # Patience sort xs, stacking (x, prev_ix) pairs on the piles. # Prev_ix indexes the element at the top of the previous pile, # which has a lower x value than the current x value. piles = [[]] # Create a dummy pile 0 for x, p in patience_sort(xs): if p + 1 == len(piles): piles.append([]) # backlink to the top of the previous pile piles[p + 1].append((x, len(piles[p]) - 1)) # Backtrack to find a longest increasing subsequence npiles = len(piles) - 1 prev = 0 lis = list() for pile in range(npiles, 0, -1): x, prev = piles[pile][prev] lis.append(x) lis.reverse() return lis
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def backtracking(a, L, bestsofar): """ Start with the heaviest weight and emit index """
w, j = max(L.items()) while j != -1: yield j w, j = bestsofar[j]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mappability(args): """ %prog mappability reference.fasta Generate 50mer mappability for reference genome. Commands are based on gem mapper. See instructions: <https://github.com/xuefzhao/Reference.Mappability> """
p = OptionParser(mappability.__doc__) p.add_option("--mer", default=50, type="int", help="User mer size") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) ref, = args K = opts.mer pf = ref.rsplit(".", 1)[0] mm = MakeManager() gem = pf + ".gem" cmd = "gem-indexer -i {} -o {}".format(ref, pf) mm.add(ref, gem, cmd) mer = pf + ".{}mer".format(K) mapb = mer + ".mappability" cmd = "gem-mappability -I {} -l {} -o {} -T {}".\ format(gem, K, mer, opts.cpus) mm.add(gem, mapb, cmd) wig = mer + ".wig" cmd = "gem-2-wig -I {} -i {} -o {}".format(gem, mapb, mer) mm.add(mapb, wig, cmd) bw = mer + ".bw" cmd = "wigToBigWig {} {}.sizes {}".format(wig, mer, bw) mm.add(wig, bw, cmd) bg = mer + ".bedGraph" cmd = "bigWigToBedGraph {} {}".format(bw, bg) mm.add(bw, bg, cmd) merged = mer + ".filtered-1.merge.bed" cmd = "python -m jcvi.formats.bed filterbedgraph {} 1".format(bg) mm.add(bg, merged, cmd) mm.write()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def freq(args): """ %prog freq fastafile bamfile Call SNP frequencies and generate GFF file. """
p = OptionParser(freq.__doc__) p.add_option("--mindepth", default=3, type="int", help="Minimum depth [default: %default]") p.add_option("--minqual", default=20, type="int", help="Minimum quality [default: %default]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, bamfile = args cmd = "freebayes -f {0} --pooled-continuous {1}".format(fastafile, bamfile) cmd += " -F 0 -C {0}".format(opts.mindepth) cmd += ' | vcffilter -f "QUAL > {0}"'.format(opts.minqual) cmd += " | vcfkeepinfo - AO RO TYPE" sh(cmd, outfile=opts.outfile)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def frommaf(args): """ %prog frommaf maffile Convert to four-column tabular format from MAF. """
p = OptionParser(frommaf.__doc__) p.add_option("--validate", help="Validate coordinates against FASTA [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) maf, = args snpfile = maf.rsplit(".", 1)[0] + ".vcf" fp = open(maf) fw = open(snpfile, "w") total = 0 id = "." qual = 20 filter = "PASS" info = "DP=20" print("##fileformat=VCFv4.0", file=fw) print("#CHROM POS ID REF ALT QUAL FILTER INFO".replace(" ", "\t"), file=fw) for row in fp: atoms = row.split() c, pos, ref, alt = atoms[:4] try: c = int(c) except: continue c = "chr{0:02d}".format(c) pos = int(pos) print("\t".join(str(x) for x in \ (c, pos, id, ref, alt, qual, filter, info)), file=fw) total += 1 fw.close() validate = opts.validate if not validate: return from jcvi.utils.cbook import percentage f = Fasta(validate) fp = open(snpfile) nsnps = 0 for row in fp: if row[0] == '#': continue c, pos, id, ref, alt, qual, filter, info = row.split("\t") pos = int(pos) feat = dict(chr=c, start=pos, stop=pos) s = f.sequence(feat) s = str(s) assert s == ref, "Validation error: {0} is {1} (expect: {2})".\ format(feat, s, ref) nsnps += 1 if nsnps % 50000 == 0: logging.debug("SNPs parsed: {0}".format(percentage(nsnps, total))) logging.debug("A total of {0} SNPs validated and written to `{1}`.".\ format(nsnps, snpfile))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def libs(args): """ %prog libs libfile Get list of lib_ids to be run by pull(). The SQL commands: select library.lib_id, library.name from library join bac on library.bac_id=bac.id where bac.lib_name="Medicago"; select seq_name from sequence where seq_name like 'MBE%' and trash is null; """
p = OptionParser(libs.__doc__) p.set_db_opts(dbname="track", credentials=None) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) libfile, = args sqlcmd = "select library.lib_id, library.name, bac.gb# from library join bac on " + \ "library.bac_id=bac.id where bac.lib_name='Medicago'" cur = connect(opts.dbname) results = fetchall(cur, sqlcmd) fw = open(libfile, "w") for lib_id, name, gb in results: name = name.translate(None, "\n") if not gb: gb = "None" print("|".join((lib_id, name, gb)), file=fw) fw.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pull(args): """ %prog pull libfile Pull the sequences using the first column in the libfile. """
p = OptionParser(pull.__doc__) p.set_db_opts(dbname="mtg2", credentials=None) p.add_option("--frag", default=False, action="store_true", help="The command to pull sequences from db [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) libfile, = args dbname = opts.dbname frag = opts.frag fp = open(libfile) hostname, username, password = get_profile() for row in fp: lib_id, name = row.split("|", 1) sqlfile = lib_id + ".sql" if not op.exists(sqlfile): fw = open(sqlfile, "w") print("select seq_name from sequence where seq_name like" + \ " '{0}%' and trash is null".format(lib_id), file=fw) fw.close() if frag: cmd = "pullfrag -D {0} -n {1}.sql -o {1} -q -S {2}".format(dbname, lib_id, hostname) cmd += " -U {0} -P {1}".format(username, password) else: cmd = "pullseq -D {0} -n {1}.sql -o {1} -q".format(dbname, lib_id) sh(cmd)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_record(fp, first_line=None): """ Read a record from a file of AMOS messages On success returns a Message object On end of file raises EOFError """
if first_line is None: first_line = fp.readline() if not first_line: raise EOFError() match = _START.match(first_line) if not match: raise Exception('Bad start of message', first_line) type = match.group(1) message = Message(type) while True: row = fp.readline() match = _MULTILINE_FIELD.match(row) if match: key = match.group(1) val = "" while row: pos = fp.tell() row = fp.readline() if row[0] in '.': break elif row[0] in '{}': fp.seek(pos) # put the line back break val += row message.contents.append((key, val, True)) continue match = _FIELD.match(row) if match: key, val = match.group(1), match.group(2) message.contents.append((key, val, False)) continue match = _START.match(row) if match: message.append(read_record(fp, row)) continue if row[0] == '}': break raise Exception('Bad line', row) return message
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter(args): """ %prog filter frgfile idsfile Removes the reads from frgfile that are indicated as duplicates in the clstrfile (generated by CD-HIT-454). `idsfile` includes a set of names to include in the filtered frgfile. See apps.cdhit.ids(). """
p = OptionParser(filter.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) frgfile, idsfile = args assert frgfile.endswith(".frg") fp = open(idsfile) allowed = set(x.strip() for x in fp) logging.debug("A total of {0} allowed ids loaded.".format(len(allowed))) newfrgfile = frgfile.replace(".frg", ".filtered.frg") fp = open(frgfile) fw = open(newfrgfile, "w") nfrags, discarded_frags = 0, 0 nmates, discarded_mates = 0, 0 for rec in iter_records(fp): if rec.type == "FRG": readname = rec.get_field("acc") readname = readname.rstrip("ab") nfrags += 1 if readname not in allowed: discarded_frags += 1 continue if rec.type == "LKG": readname = rec.get_field("frg") readname = readname.rstrip("ab") nmates += 1 if readname not in allowed: discarded_mates += 1 continue print(rec, file=fw) # Print out a summary survived_frags = nfrags - discarded_frags survived_mates = nmates - discarded_mates print("Survived fragments: {0}".\ format(percentage(survived_frags, nfrags)), file=sys.stderr) print("Survived mates: {0}".\ format(percentage(survived_mates, nmates)), file=sys.stderr)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def frg(args): """ %prog frg frgfile Extract FASTA sequences from frg reads. """
p = OptionParser(frg.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) frgfile, = args fastafile = frgfile.rsplit(".", 1)[0] + ".fasta" fp = open(frgfile) fw = open(fastafile, "w") for rec in iter_records(fp): if rec.type != "FRG": continue id = rec.get_field("acc") seq = rec.get_field("seq") s = SeqRecord(Seq(seq), id=id, description="") SeqIO.write([s], fw, "fasta") fw.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def asm(args): """ %prog asm asmfile Extract FASTA sequences from asm reads. """
p = OptionParser(asm.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) asmfile, = args prefix = asmfile.rsplit(".", 1)[0] ctgfastafile = prefix + ".ctg.fasta" scffastafile = prefix + ".scf.fasta" fp = open(asmfile) ctgfw = open(ctgfastafile, "w") scffw = open(scffastafile, "w") for rec in iter_records(fp): type = rec.type if type == "CCO": fw = ctgfw pp = "ctg" elif type == "SCF": fw = scffw pp = "scf" else: continue id = rec.get_field("acc") id = id.translate(None, "()").split(",")[0] seq = rec.get_field("cns").translate(None, "-") s = SeqRecord(Seq(seq), id=pp + id, description="") SeqIO.write([s], fw, "fasta") fw.flush() fw.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def count(args): """ %prog count frgfile Count each type of messages """
p = OptionParser(count.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) frgfile, = args fp = open(frgfile) counts = defaultdict(int) for rec in iter_records(fp): counts[rec.type] += 1 for type, cnt in sorted(counts.items()): print('{0}: {1}'.format(type, cnt), file=sys.stderr)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prepare(args): """ %prog prepare countfolder families Parse list of count files and group per family into families folder. """
p = OptionParser(prepare.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) counts, families = args countfiles = glob(op.join(counts, "*.count")) countsdb = defaultdict(list) for c in countfiles: rs = RiceSample(c) countsdb[(rs.tissue, rs.ind)].append(rs) # Merge duplicates - data sequenced in different batches key = lambda x: (x.label, x.rep) for (tissue, ind), rs in sorted(countsdb.items()): rs.sort(key=key) nrs = len(rs) for i in xrange(nrs): ri = rs[i] if not ri.working: continue for j in xrange(i + 1, nrs): rj = rs[j] if key(ri) != key(rj): continue ri.merge(rj) rj.working = False countsdb[(tissue, ind)] = [x for x in rs if x.working] # Group into families mkdir("families") for (tissue, ind), r in sorted(countsdb.items()): r = list(r) if r[0].label != "F1": continue P1, P2 = r[0].P1, r[0].P2 P1, P2 = countsdb[(tissue, P1)], countsdb[(tissue, P2)] rs = P1 + P2 + r groups = [1] * len(P1) + [2] * len(P2) + [3] * len(r) assert len(rs) == len(groups) outfile = "-".join((tissue, ind)) merge_counts(rs, op.join(families, outfile)) groupsfile = outfile + ".groups" fw = open(op.join(families, groupsfile), "w") print(",".join(str(x) for x in groups), file=fw) fw.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def outlier_cutoff(a, threshold=3.5): """ Iglewicz and Hoaglin's robust, returns the cutoff values - lower bound and upper bound. """
A = np.array(a, dtype=float) M = np.median(A) D = np.absolute(A - M) MAD = np.median(D) C = threshold / .67449 * MAD return M - C, M + C
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bed(args): """ %prog bed genes.ids Get gene bed from phytozome. `genes.ids` contains the list of gene you want to pull from Phytozome. Write output to .bed file. """
p = OptionParser(bed.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) idsfile, = args ids = set(x.strip() for x in open(idsfile)) data = get_bed_from_phytozome(list(ids)) pf = idsfile.rsplit(".", 1)[0] bedfile = pf + ".bed" fw = open(bedfile, "w") for i, row in enumerate(data): row = row.strip() if row == "": continue print(row, file=fw) logging.debug("A total of {0} records written to `{1}`.".format(i + 1, bedfile))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bed(args): """ %prog bed binfile fastafile Write bed files where the bases have at least certain depth. """
p = OptionParser(bed.__doc__) p.add_option("-o", dest="output", default="stdout", help="Output file name [default: %default]") p.add_option("--cutoff", dest="cutoff", default=10, type="int", help="Minimum read depth to report intervals [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) binfile, fastafile = args fw = must_open(opts.output, "w") cutoff = opts.cutoff assert cutoff >= 0, "Need non-negative cutoff" b = BinFile(binfile) ar = b.array fastasize, sizes, offsets = get_offsets(fastafile) s = Sizes(fastafile) for ctg, ctglen in s.iter_sizes(): offset = offsets[ctg] subarray = ar[offset:offset + ctglen] key = lambda x: x[1] >= cutoff for tf, array_elements in groupby(enumerate(subarray), key=key): array_elements = list(array_elements) if not tf: continue # 0-based system => 1-based system start = array_elements[0][0] + 1 end = array_elements[-1][0] + 1 mean_depth = sum([x[1] for x in array_elements]) / \ len(array_elements) mean_depth = int(mean_depth) name = "na" print("\t".join(str(x) for x in (ctg, \ start - 1, end, name, mean_depth)), file=fw)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def query(args): """ %prog query binfile fastafile ctgID baseID Get the depth at a particular base. """
p = OptionParser(query.__doc__) opts, args = p.parse_args(args) if len(args) != 4: sys.exit(not p.print_help()) binfile, fastafile, ctgID, baseID = args b = BinFile(binfile, fastafile) ar = b.mmarray fastasize, sizes, offsets = get_offsets(fastafile) oi = offsets[ctgID] + int(baseID) - 1 print("\t".join((ctgID, baseID, str(ar[oi]))))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def count(args): """ %prog count t.coveragePerBase fastafile Serialize the genomeCoverage results. The coordinate system of the count array will be based on the fastafile. """
p = OptionParser(count.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) coveragefile, fastafile = args countsfile = coveragefile.split(".")[0] + ".bin" if op.exists(countsfile): logging.error("`{0}` file exists. Remove before proceed."\ .format(countsfile)) return fastasize, sizes, offsets = get_offsets(fastafile) logging.debug("Initialize array of uint8 with size {0}".format(fastasize)) ar = np.zeros(fastasize, dtype=np.uint8) update_array(ar, coveragefile, sizes, offsets) ar.tofile(countsfile) logging.debug("Array written to `{0}`".format(countsfile))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def edges_to_path(edges): """ Connect edges and return a path. """
if not edges: return None G = edges_to_graph(edges) path = nx.topological_sort(G) return path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def max_sum(a): """ For an input array a, output the range that gives the largest sum (17, 0, 2) (10, 2, 2) (19, 4, 9) """
max_sum, max_start_index, max_end_index = -Infinity, 0, 0 current_max_sum = 0 current_start_index = 0 for current_end_index, x in enumerate(a): current_max_sum += x if current_max_sum > max_sum: max_sum, max_start_index, max_end_index = current_max_sum, \ current_start_index, current_end_index if current_max_sum < 0: current_max_sum = 0 current_start_index = current_end_index + 1 return max_sum, max_start_index, max_end_index
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def silicosoma(args): """ %prog silicosoma in.silico > out.soma Convert .silico to .soma file. Format of .silico A text file containing in-silico digested contigs. This file contains pairs of lines. The first line in each pair constains an identifier, this contig length in bp, and the number of restriction sites, separated by white space. The second line contains a white space delimited list of the restriction site positions. Format of .soma Each line of the text file contains two decimal numbers: The size of the fragment and the standard deviation (both in kb), separated by white space. The standard deviation is ignored. """
p = OptionParser(silicosoma.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) silicofile, = args fp = must_open(silicofile) fw = must_open(opts.outfile, "w") next(fp) positions = [int(x) for x in fp.next().split()] for a, b in pairwise(positions): assert a <= b fragsize = int(round((b - a) / 1000.)) # kb if fragsize: print(fragsize, 0, file=fw)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def condense(args): """ %prog condense OM.bed Merge split alignments in OM bed. """
from itertools import groupby from jcvi.assembly.patch import merge_ranges p = OptionParser(condense.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) bedfile, = args bed = Bed(bedfile, sorted=False) key = lambda x: (x.seqid, x.start, x.end) for k, sb in groupby(bed, key=key): sb = list(sb) b = sb[0] chr, start, end, strand = merge_ranges(sb) id = "{0}:{1}-{2}".format(chr, start, end) b.accn = id print(b)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def chimera(args): """ %prog chimera bedfile Scan the bed file to break scaffolds that multi-maps. """
p = OptionParser(chimera.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) bedfile, = args bed = Bed(bedfile) selected = select_bed(bed) mapped = defaultdict(set) # scaffold => chr chimerabed = "chimera.bed" fw = open(chimerabed, "w") for b in selected: scf = range_parse(b.accn).seqid chr = b.seqid mapped[scf].add(chr) nchimera = 0 for s, chrs in sorted(mapped.items()): if len(chrs) == 1: continue print("=" * 80, file=sys.stderr) print("{0} mapped to multiple locations: {1}".\ format(s, ",".join(sorted(chrs))), file=sys.stderr) ranges = [] for b in selected: rr = range_parse(b.accn) scf = rr.seqid if scf == s: print(b, file=sys.stderr) ranges.append(rr) # Identify breakpoints ranges.sort(key=lambda x: (x.seqid, x.start, x.end)) for a, b in pairwise(ranges): seqid = a.seqid if seqid != b.seqid: continue start, end = a.end, b.start if start > end: start, end = end, start chimeraline = "\t".join(str(x) for x in (seqid, start, end)) print(chimeraline, file=fw) print(chimeraline, file=sys.stderr) nchimera += 1 fw.close() logging.debug("A total of {0} junctions written to `{1}`.".\ format(nchimera, chimerabed))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def select_bed(bed): """ Return non-overlapping set of ranges, choosing high scoring blocks over low scoring alignments when there are conflicts. """
ranges = [Range(x.seqid, x.start, x.end, float(x.score), i) for i, x in enumerate(bed)] selected, score = range_chain(ranges) selected = [bed[x.id] for x in selected] return selected