diff --git a/.github/workflows/python_test.yml b/.github/workflows/python_test.yml index fdf6921889..4da9939b17 100644 --- a/.github/workflows/python_test.yml +++ b/.github/workflows/python_test.yml @@ -11,7 +11,7 @@ jobs: max-parallel: 4 fail-fast: false matrix: - python-version: ['3.6', '3.7', '3.8'] + python-version: ['3.7', '3.8', '3.9'] steps: - uses: actions/checkout@v1 @@ -56,9 +56,9 @@ jobs: - name: Test with pytest run: | py.test pycqed/tests --cov=pycqed --cov-report xml --cov-report html --cov-config=.coveragerc - - name: Upload code coverage report - run: | - bash <(curl -Ls https://coverage.codacy.com/get.sh) report -r coverage.xml - codecov - env: # set secrets as environmental variables - CODACY_PROJECT_TOKEN: ${{ secrets.CODACY_PROJECT_TOKEN }} +# - name: Upload code coverage report +# run: | +# bash <(curl -Ls https://coverage.codacy.com/get.sh) report -r coverage.xml +# codecov +# env: # set secrets as environmental variables +# CODACY_PROJECT_TOKEN: ${{ secrets.CODACY_PROJECT_TOKEN }} diff --git a/pycqed/instrument_drivers/meta_instrument/HAL/HAL_ShimMQ.py b/pycqed/instrument_drivers/meta_instrument/HAL/HAL_ShimMQ.py index 60d9acac9f..dd7811d934 100644 --- a/pycqed/instrument_drivers/meta_instrument/HAL/HAL_ShimMQ.py +++ b/pycqed/instrument_drivers/meta_instrument/HAL/HAL_ShimMQ.py @@ -10,6 +10,7 @@ import warnings from collections import OrderedDict import numpy as np +from typing import List from deprecated import deprecated from pycqed.measurement import detector_functions as det @@ -211,7 +212,7 @@ def prepare_readout(self, qubits, reduced: bool = False): def prepare_for_timedomain( self, - qubits: list, + qubits: List[str], reduced: bool = False, bypass_flux: bool = False, prepare_for_readout: bool = True @@ -661,15 +662,17 @@ def _set_dio_map(self, dio_map_dict): # private functions: prepare ########################################################################## - def _prep_ro_sources(self, qubits): + def _prep_ro_sources(self, qubits: List[str]): """ turn on and configure the RO LO's of all qubits to be measured and update the modulation frequency of all qubits. """ + log.info(f"preparing ro sources for qubits {qubits}") # FIXME: This device object works under the assumption that a single LO # is used to drive all readout lines. - LO = self.find_instrument(qubits[0]).instr_LO_ro.get_instr() - RO_lutman = self.find_instrument(qubits[0]).instr_LutMan_RO.get_instr() + qb = self.find_instrument(qubits[0]) + LO = qb.instr_LO_ro.get_instr() + RO_lutman = qb.instr_LutMan_RO.get_instr() LO.frequency.set(RO_lutman.LO_freq()) LO.power(self.ro_pow_LO()) LO.on() @@ -690,7 +693,7 @@ def _prep_ro_sources(self, qubits): LO_q.on() #raise ValueError("Expect a single LO to drive all feedlines") - def _prep_ro_assign_weights(self, qubits): + def _prep_ro_assign_weights(self, qubits: List[str]): """ Assign acquisition weight channels to the different qubits. @@ -755,7 +758,7 @@ def _prep_ro_assign_weights(self, qubits): return acq_ch_map # FIXME: align with HAL_ShimSQ::_prep_ro_integration_weights - def _prep_ro_integration_weights(self, qubits): + def _prep_ro_integration_weights(self, qubits: List[str]): """ Set the acquisition integration weights on each channel. @@ -823,7 +826,7 @@ def _prep_ro_integration_weights(self, qubits): raise NotImplementedError('ro_acq_weight_type "{}" not supported'.format(self.ro_acq_weight_type())) # FIXME: align with HAL_ShimSQ::_prep_ro_pulses - def _prep_ro_pulses(self, qubits): + def _prep_ro_pulses(self, qubits: List[str]): """ Configure the ro lutmans. diff --git a/pycqed/instrument_drivers/meta_instrument/HAL_Device.py b/pycqed/instrument_drivers/meta_instrument/HAL_Device.py index 866f15a505..281b737bcd 100644 --- a/pycqed/instrument_drivers/meta_instrument/HAL_Device.py +++ b/pycqed/instrument_drivers/meta_instrument/HAL_Device.py @@ -10,15 +10,14 @@ import logging import adaptive import networkx as nx -import datetime import multiprocessing from importlib import reload from typing import List, Union, Optional, Tuple from deprecated import deprecated from pycqed.instrument_drivers.meta_instrument.HAL.HAL_ShimMQ import HAL_ShimMQ +from pycqed.measurement.openql_experiments.clifford_rb_oql import run_tasks -from pycqed.analysis import multiplexed_RO_analysis as mra from pycqed.measurement import detector_functions as det reload(det) @@ -35,12 +34,12 @@ #from pycqed.instrument_drivers.physical_instruments.QuTech_AWG_Module import QuTech_AWG_Module from pycqed.measurement.measurement_control import MeasurementControl -from qcodes.instrument.parameter import ManualParameter, Parameter +from qcodes.instrument.parameter import ManualParameter log = logging.getLogger(__name__) -try: +try: # FIXME: why? from pycqed.measurement.openql_experiments import single_qubit_oql as sqo import pycqed.measurement.openql_experiments.multi_qubit_oql as mqo from pycqed.measurement.openql_experiments import clifford_rb_oql as cl_oql @@ -599,7 +598,7 @@ def measure_parity_check_flux_dance( else: log.warning(f"Target qubit {target_qubits[0]} not X or Z!") - # if ramsey_qubits is given as list of qubit names, + # if ramsey_qubits is given as list of qubit names, # only those will be used and converted to qubit numbers. # if ramsey_qubits is given as boolean, # all ancillas that are not part of the parity check will be ramseyd @@ -611,18 +610,18 @@ def measure_parity_check_flux_dance( log.warning(f"Ramsey qubit {qb} already given as ancilla qubit!") Q_idxs_ramsey += [self.find_instrument(qb).cfg_qubit_nr()] - Q_idxs_target = [] + Q_idxs_target = [] for i,target_qubit in enumerate(target_qubits): log.info(f"Parity {target_qubit} - {control_qubits}, flux dance steps {flux_dance_steps}") Q_idxs_target += [self.find_instrument(target_qubit).cfg_qubit_nr()] # filter control qubits based on control_cases_to_measure, # then the cases will be created based on the filtered control qubits - Q_idxs_control = [] + Q_idxs_control = [] if not control_cases_to_measure: # if cases are not given, measure all cases for all control qubits - control_qubits_by_case = control_qubits - Q_idxs_control += [self.find_instrument(Q).cfg_qubit_nr() for Q in control_qubits_by_case] + control_qubits_by_case = control_qubits + Q_idxs_control += [self.find_instrument(Q).cfg_qubit_nr() for Q in control_qubits_by_case] cases = ['{:0{}b}'.format(i, len(Q_idxs_control)) for i in range(2**len(Q_idxs_control))] else: # if cases are given, prepare and measure only them @@ -633,14 +632,14 @@ def measure_parity_check_flux_dance( control_qubits_by_case += [control_qubits[i] for i,c in enumerate(case) \ if c == '1' and control_qubits[i] not in control_qubits_by_case] #control_qubits_by_case += [control_qubits[i] for i,c in enumerate(case) if c == '1'] - + # sort selected control qubits according to readout (feedline) order # qb_ro_order = np.sum([ list(self._acq_ch_map[key].keys()) for key in self._acq_ch_map.keys()], dtype=object) # dqb_ro_order = np.array(qb_ro_order, dtype=str)[[qb[0] == 'D' for qb in qb_ro_order]] control_qubits_by_case = [x for x,_ in sorted(zip(control_qubits_by_case, control_qubits))] - + Q_idxs_control += [self.find_instrument(Q).cfg_qubit_nr() for Q in control_qubits_by_case] - cases = control_cases_to_measure + cases = control_cases_to_measure # for separate preparation of parking qubits in 1, used to study parking if parking_qubits: @@ -659,9 +658,9 @@ def measure_parity_check_flux_dance( # MW preparation for qb in all_qubits: mw_lutman = self.find_instrument(qb).instr_LutMan_MW.get_instr() - # check the lutman of the target, control and parking qubits for cw_27, + # check the lutman of the target, control and parking qubits for cw_27, # which is needed for refocusing, case preparation, and preparation in 1 (respectively) - # and prepare if necessary + # and prepare if necessary xm180_dict = {"name": "rXm180", "theta": -180, "phi": 0, "type": "ge"} if mw_lutman.LutMap().get(27) != xm180_dict: log.warning(f"{mw_lutman.name} does not have refocusing pulse, overriding `cw_27` ...") @@ -680,14 +679,14 @@ def measure_parity_check_flux_dance( if prepare_for_timedomain: # Take care of readout order (by feedline/UHF) if self.qubits_by_feedline(): - all_qubits = sorted(all_qubits, + all_qubits = sorted(all_qubits, key=lambda x: [i for i, feedline in enumerate(self.qubits_by_feedline()) \ if x in feedline]) log.info(f"Sorted qubits for readout preparation: {all_qubits}") else: log.warning("Qubit order by feedline in `self.qubits_by_feedline()` parameter is not set, " + "readout will be prepared in order of given qubits which can lead to errors!") - + self.prepare_for_timedomain(qubits=all_qubits) # These are hardcoded angles in the mw_lutman for the AWG8 @@ -696,7 +695,7 @@ def measure_parity_check_flux_dance( # prepare flux codeword list according to given step numbers # will be programmed in order of the list, but scheduled in parallel (if possible) - + if refocusing: flux_cw_list = [flux_codeword + '_refocus' + f'_{step}' for step in flux_dance_steps] @@ -746,7 +745,7 @@ def measure_parity_check_flux_dance( return a.result # a = ma2.Parity_Check_Analysis( - # label=label, + # label=label, # target_qubit=target_qubits[0], # extract_only=not plotting, # analyze_parity_model=analyze_parity_model @@ -757,7 +756,7 @@ def measure_parity_check_flux_dance( # if analyze_parity_model: # model_errors = a.proc_data_dict['quantities_of_interest']['parity_model']['model_errors'] # model_terms = a.proc_data_dict['quantities_of_interest']['parity_model']['model_terms'] - # # this return structure is necessary to use this as a detector function + # # this return structure is necessary to use this as a detector function # # for higher level calibration routines # result = {**result, # 'model_errors': model_errors, @@ -838,20 +837,20 @@ def measure_parity_check_fidelity( all_qubits = target_qubits + control_qubits # MW preparation - Q_idxs_control = [] + Q_idxs_control = [] for qb in control_qubits: - Q_idxs_control += [self.find_instrument(qb).cfg_qubit_nr()] + Q_idxs_control += [self.find_instrument(qb).cfg_qubit_nr()] mw_lutman = self.find_instrument(qb).instr_LutMan_MW.get_instr() - # check the lutman of the target, control and parking qubits for cw_27, + # check the lutman of the target, control and parking qubits for cw_27, # which is needed for refocusing, case preparation, and preparation in 1 (respectively) - # and prepare if necessary + # and prepare if necessary xm180_dict = {"name": "rXm180", "theta": -180, "phi": 0, "type": "ge"} if mw_lutman.LutMap().get(27) != xm180_dict: log.warning(f"{mw_lutman.name} does not have refocusing pulse, overriding `cw_27` ...") mw_lutman.LutMap()[27] = xm180_dict mw_lutman.load_waveform_onto_AWG_lookuptable(27, regenerate_waveforms=True) - Q_idxs_target = [] + Q_idxs_target = [] for i,ancilla in enumerate(target_qubits): log.info(f"Parity check fidelity {ancilla} - {control_qubits}") Q_idxs_target += [self.find_instrument(ancilla).cfg_qubit_nr()] @@ -882,7 +881,7 @@ def measure_parity_check_fidelity( if prepare_for_timedomain: # Take care of readout order (by feedline/UHF) if self.qubits_by_feedline(): - all_qubits = sorted(all_qubits, + all_qubits = sorted(all_qubits, key=lambda x: [i for i, feedline in enumerate(self.qubits_by_feedline()) \ if x in feedline]) log.info(f"Sorted qubits for readout preparation: {all_qubits}") @@ -917,10 +916,10 @@ def measure_parity_check_fidelity( s = swf.OpenQL_Sweep(openql_program=p, CCL=self.instr_CC.get_instr()) d = self.get_int_logging_detector( - qubits=target_qubits+control_qubits, + qubits=target_qubits+control_qubits, result_logging_mode=result_logging_mode ) - shots_per_meas = int(np.floor(np.min([shots_per_meas, nr_shots]) / len(cases)) + shots_per_meas = int(np.floor(np.min([shots_per_meas, nr_shots]) / len(cases)) * len(cases) ) d.set_child_attr("nr_shots", shots_per_meas) @@ -957,8 +956,8 @@ def measure_sandia_parity_benchmark(self, # RO preparation (assign res_combinations) ########################################### RO_lms = np.unique([self.find_instrument(q).instr_LutMan_RO() for q in all_qubits]) - qubit_RO_lm = { self.find_instrument(q).cfg_qubit_nr() : - (self.find_instrument(q).name, + qubit_RO_lm = { self.find_instrument(q).cfg_qubit_nr() : + (self.find_instrument(q).name, self.find_instrument(q).instr_LutMan_RO()) for q in all_qubits } main_qubits = [] exception_qubits = [] @@ -1005,7 +1004,7 @@ def measure_sandia_parity_benchmark(self, MC.run(f"Sandia_parity_benchmark_{ancilla_qubit}_{data_qubits[0]}_{data_qubits[1]}_{data_qubits[2]}_{data_qubits[3]}") ma2.pba.Sandia_parity_benchmark(label='Sandia', - ancilla_qubit=ancilla_qubit, + ancilla_qubit=ancilla_qubit, data_qubits=data_qubits, exception_qubits=exception_qubits) @@ -1030,8 +1029,8 @@ def measure_weight4_parity_tomography( # RO preparation (assign res_combinations) ########################################### RO_lms = np.unique([self.find_instrument(q).instr_LutMan_RO() for q in all_qubits]) - qubit_RO_lm = { self.find_instrument(q).cfg_qubit_nr() : - (self.find_instrument(q).name, + qubit_RO_lm = { self.find_instrument(q).cfg_qubit_nr() : + (self.find_instrument(q).name, self.find_instrument(q).instr_LutMan_RO()) for q in all_qubits } main_qubits = [] exception_qubits = [] @@ -1062,7 +1061,7 @@ def measure_weight4_parity_tomography( ro_lm = self.find_instrument(lm) ro_lm.resonator_combinations(res_combs[lm]) ro_lm.load_DIO_triggered_sequence_onto_UHFQC() - + p = mqo.Weight_4_parity_tomography( Q_anc=ancilla_idx, Q_D1=data_idxs[0], @@ -1086,7 +1085,7 @@ def measure_weight4_parity_tomography( MC.soft_avg(1) MC.live_plot_enabled(False) MC.set_sweep_function(s) - MC.set_sweep_points(np.arange(int(uhfqc_max_avg/readouts_per_round) + MC.set_sweep_points(np.arange(int(uhfqc_max_avg/readouts_per_round) * readouts_per_round * repetitions)) MC.set_detector_function(d) MC.run(f'Weight_4_parity_tomography_{ancilla_qubit}_{data_qubits}_sim-msmt-{sim_measurement}_{label}') @@ -1119,7 +1118,7 @@ def measure_phase_corrections( phase_updates = dict.fromkeys([pair[0] for pair in pairs]) for i,pair in enumerate(pairs): phase_updates[pair[0]] = a[f"pair_{i}_phi_0_a"] - + if measure_switched_target: a = self.measure_conditional_oscillation( pairs=[pair[::-1] for pair in pairs], @@ -1153,7 +1152,7 @@ def measure_phase_corrections( def measure_two_qubit_tomo_bell( self, - qubits: list, + qubits: List[str], bell_state=0, wait_after_flux=None, analyze=True, @@ -1326,7 +1325,8 @@ def measure_two_qubit_allxy( def measure_two_qubit_allXY_crosstalk( - self, q0: str, + self, + q0: str, q1: str, q1_replace_cases: list = [ None, "i", "rx180", "rx180", "rx180" @@ -1424,7 +1424,8 @@ def measure_residual_ZZ_coupling( def measure_state_tomography( - self, qubits=['D2', 'X'], + self, + qubits: List[str] = ['D2', 'X'], MC: Optional[MeasurementControl] = None, bell_state: float = None, product_state: float = None, @@ -1485,7 +1486,7 @@ def measure_state_tomography( def measure_ssro_multi_qubit( self, - qubits: list, + qubits: List[str], nr_shots_per_case: int = 2 ** 13, # 8192 prepare_for_timedomain: bool = True, result_logging_mode='raw', @@ -1600,7 +1601,7 @@ def measure_ssro_multi_qubit( def measure_ssro_single_qubit( self, - qubits: list, + qubits: List[str], q_target: str, nr_shots: int = 2 ** 13, # 8192 prepare_for_timedomain: bool = True, @@ -1730,7 +1731,7 @@ def measure_ssro_single_qubit( def measure_transients( self, - qubits: list, + qubits: List[str], q_target: str, cases: list = ['off', 'on'], MC: Optional[MeasurementControl] = None, @@ -1803,7 +1804,7 @@ def measure_transients( def measure_msmt_induced_dephasing( self, - meas_qubit: str, + meas_qubit: str, target_qubits: list, measurement_time_ns: int, echo_times: list = None, @@ -1822,8 +1823,8 @@ def measure_msmt_induced_dephasing( # RO preparation (assign res_combinations) ########################################### RO_lms = np.unique([self.find_instrument(q).instr_LutMan_RO() for q in all_qubits]) - qubit_RO_lm = { self.find_instrument(q).cfg_qubit_nr() : - (self.find_instrument(q).name, + qubit_RO_lm = { self.find_instrument(q).cfg_qubit_nr() : + (self.find_instrument(q).name, self.find_instrument(q).instr_LutMan_RO()) for q in all_qubits } main_qubits = [] exception_qubits = [] @@ -1857,8 +1858,8 @@ def measure_msmt_induced_dephasing( assert echo_phases != None for i, q in enumerate(target_qubits): mw_lm = self.find_instrument(f'MW_lutman_{q}') - print(mw_lm.name) - mw_lm.LutMap()[30] = {'name': 'rEcho', 'theta': 180, + print(mw_lm.name) + mw_lm.LutMap()[30] = {'name': 'rEcho', 'theta': 180, 'phi': echo_phases[i], 'type': 'ge'} mw_lm.load_phase_pulses_to_AWG_lookuptable() @@ -1883,7 +1884,7 @@ def measure_msmt_induced_dephasing( MC.soft_avg(1) MC.live_plot_enabled(True) MC.set_sweep_function(s) - sw_pts = np.concatenate((np.repeat(np.arange(0, 360, 20), 6), + sw_pts = np.concatenate((np.repeat(np.arange(0, 360, 20), 6), np.array([360, 361, 362, 364]))) MC.set_sweep_points(sw_pts) MC.set_detector_function(d) @@ -2366,7 +2367,7 @@ def restore_pars(): def measure_cryoscope( self, - qubits, + qubits: List[str], times, MC: Optional[MeasurementControl] = None, nested_MC: Optional[MeasurementControl] = None, @@ -2410,14 +2411,14 @@ def measure_cryoscope( for q in qubits: assert q in self.qubits() - + Q_idxs = [self.find_instrument(q).cfg_qubit_nr() for q in qubits] if prepare_for_timedomain: self.prepare_for_timedomain(qubits=qubits) if max_delay is None: - max_delay = 0 + max_delay = 0 else: max_delay = np.max(times) + 40e-9 @@ -2431,7 +2432,7 @@ def measure_cryoscope( flux_cw = "fl_cw_06" elif waveform_name == "custom_wf": - Sw_functions = [swf.FLsweep(lutman, lutman.custom_wf_length, + Sw_functions = [swf.FLsweep(lutman, lutman.custom_wf_length, waveform_name="custom_wf") for lutman in Fl_lutmans] swfs = swf.multi_sweep_function(Sw_functions) flux_cw = "fl_cw_05" @@ -2585,7 +2586,7 @@ def measure_cryoscope_vs_amp( def measure_timing_diagram( self, - qubits: list, + qubits: List[str], flux_latencies, microwave_latencies, MC: Optional[MeasurementControl] = None, @@ -2666,30 +2667,25 @@ def measure_timing_diagram( def measure_two_qubit_randomized_benchmarking( self, - qubits, - nr_cliffords=np.array( - [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0, 12.0, 15.0, 20.0, 25.0, 30.0, 50.0] - ), + qubits: List[str], + nr_cliffords=np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0, 12.0, 15.0, 20.0, 25.0, 30.0, 50.0]), nr_seeds=100, interleaving_cliffords=[None], label="TwoQubit_RB_{}seeds_recompile={}_icl{}_{}_{}_{}", - recompile: bool = "as needed", cal_points=True, flux_codeword="cz", flux_allocated_duration_ns: int = None, sim_cz_qubits: list = None, - compile_only: bool = False, - pool=None, # a multiprocessing.Pool() - rb_tasks=None, # used after called with `compile_only=True` - MC=None + sim_single_qubits: list = None, + + MC: Optional[MeasurementControl] = None, + recompile: bool = "as needed", + parallel: bool = False ): """ Measures two qubit randomized benchmarking, including the leakage estimate. - [2020-07-04 Victor] this method was updated to allow for parallel - compilation using all the cores of the measurement computer - Refs: Knill PRA 77, 012307 (2008) Wood PRA 97, 032306 (2018) @@ -2713,12 +2709,6 @@ def measure_two_qubit_randomized_benchmarking( label (str): string for formatting the measurement name - recompile (bool, str {'as needed'}): - indicate whether to regenerate the sequences of clifford gates. - By default it checks whether the needed sequences were already - generated since the most recent change of OpenQL file - specified in self.cfg_openql_platform_fn - cal_points (bool): should calibration point (qubits in 0 and 1 states) be included in the measurement @@ -2732,23 +2722,20 @@ def measure_two_qubit_randomized_benchmarking( CZ gates that are intended to be performed in parallel with other CZ gates. + sim_single_qubits: FIXME + flux_allocated_duration_ns (list): Duration in ns of the flux pulse used when interleaved gate is [100_000], i.e. idle identity - compile_only (bool): - Compile only the RB sequences without measuring, intended for - parallelizing iRB sequences compilation with measurements - - pool (multiprocessing.Pool): - Only relevant for `compilation_only=True` - Pool to which the compilation tasks will be assigned + recompile (bool, str {'as needed'}): + indicate whether to regenerate the sequences of clifford gates. + By default it checks whether the needed sequences were already + generated since the most recent change of OpenQL file + specified in self.cfg_openql_platform_fn - rb_tasks (list): - Only relevant when running `compilation_only=True` previously, - saving the rb_tasks, waiting for them to finish then running - this method again and providing the `rb_tasks`. - See the interleaved RB for use case. + parallel: + if True, runs compilation tasks in parallel to increase speed """ if MC is None: MC = self.instr_MC.get_instr() @@ -2757,25 +2744,33 @@ def measure_two_qubit_randomized_benchmarking( # 2-state readout and postprocessing old_weight_type = self.ro_acq_weight_type() old_digitized = self.ro_acq_digitized() - old_avg = self.ro_acq_averages() + old_avg = self.ro_acq_averages() # FIXME: unused self.ro_acq_weight_type("optimal IQ") self.ro_acq_digitized(False) self.prepare_for_timedomain(qubits=qubits) MC.soft_avg(1) # FIXME: changes state - # The detector needs to be defined before setting back parameters + # The detector needs to be defined before restoring parameters d = self.get_int_logging_detector(qubits=qubits) - # set back the settings + # restore parameters self.ro_acq_weight_type(old_weight_type) self.ro_acq_digitized(old_digitized) + # configure LutMans for q in qubits: q_instr = self.find_instrument(q) mw_lutman = q_instr.instr_LutMan_MW.get_instr() mw_lutman.load_ef_rabi_pulses_to_AWG_lookuptable() + if sim_single_qubits: + for q in sim_single_qubits: + q_instr = self.find_instrument(q) + mw_lutman = q_instr.instr_LutMan_MW.get_instr() + mw_lutman.set_default_lutmap() + mw_lutman.load_waveforms_onto_AWG_lookuptable() - MC.soft_avg(1) + MC.soft_avg(1) # FIXME: again? + # determine sim_cz_qubits_idxs FIXME: unused qubit_idxs = [self.find_instrument(q).cfg_qubit_nr() for q in qubits] if sim_cz_qubits is not None: sim_cz_qubits_idxs = [ @@ -2784,56 +2779,44 @@ def measure_two_qubit_randomized_benchmarking( else: sim_cz_qubits_idxs = None - net_cliffords = [0, 3 * 24 + 3] # see two_qubit_clifford_group::common_cliffords - - def send_rb_tasks(pool_): - tasks_inputs = [] - for i in range(nr_seeds): - task_dict = dict( - qubits=qubit_idxs, - nr_cliffords=nr_cliffords, - nr_seeds=1, - flux_codeword=flux_codeword, - flux_allocated_duration_ns=flux_allocated_duration_ns, - platf_cfg=self.cfg_openql_platform_fn(), - program_name="TwoQ_RB_int_cl_s{}_ncl{}_icl{}_{}_{}".format( - int(i), - list(map(int, nr_cliffords)), - interleaving_cliffords, - qubits[0], - qubits[1], - ), - interleaving_cliffords=interleaving_cliffords, - cal_points=cal_points, - net_cliffords=net_cliffords, # measures with and without inverting - f_state_cal_pts=True, - recompile=recompile, - sim_cz_qubits=sim_cz_qubits_idxs, - ) - tasks_inputs.append(task_dict) - - rb_tasks = pool_.map_async(cl_oql.parallel_friendly_rb, tasks_inputs) - - return rb_tasks + # determine sim_single_qubits_idxs + if sim_single_qubits is not None: + sim_single_qubits_idxs = [ + self.find_instrument(q).cfg_qubit_nr() for q in sim_single_qubits + ] + else: + sim_single_qubits_idxs = None - if compile_only: - assert pool is not None # FIXME: add proper message - rb_tasks = send_rb_tasks(pool) - return rb_tasks + net_cliffords = [0, 3 * 24 + 3] # see two_qubit_clifford_group::common_cliffords - if rb_tasks is None: - # Using `with ...:` makes sure the other processes will be terminated - # avoid starting too mane processes, - # nr_processes = None will start as many as the PC can handle - nr_processes = None if recompile else 1 - with multiprocessing.Pool( - nr_processes, - maxtasksperchild=cl_oql.maxtasksperchild # avoid RAM issues - ) as pool: - rb_tasks = send_rb_tasks(pool) - cl_oql.wait_for_rb_tasks(rb_tasks) + # define work to do + tasks_inputs = [] + for i in range(nr_seeds): + task_dict = dict( + platf_cfg=self.cfg_openql_platform_fn(), + two_qubit_pair=qubit_idxs, + single_qubits=sim_single_qubits_idxs, + nr_cliffords=nr_cliffords, + nr_seeds=1, + flux_codeword=flux_codeword, + flux_allocated_duration_ns=flux_allocated_duration_ns, + program_name="TwoQ_RB_int_cl_s{}_ncl{}_icl{}_{}_{}".format( + int(i), + list(map(int, nr_cliffords)), + interleaving_cliffords, + qubits[0], + qubits[1], + ), + interleaving_cliffords=interleaving_cliffords, + cal_points=cal_points, + two_qubit_net_cliffords=net_cliffords, + single_qubit_net_cliffords=net_cliffords, + f_state_cal_pts=True, + recompile=recompile + ) + tasks_inputs.append(task_dict) - programs_filenames = rb_tasks.get() + programs = run_tasks(cl_oql.two_qubit_randomized_benchmarking, tasks_inputs, parallel) # to include calibration points if cal_points: @@ -2846,20 +2829,22 @@ def send_rb_tasks(pool_): else: sweep_points = np.repeat(nr_cliffords, 2) + # set kwargs for set_prepare_function() below counter_param = ManualParameter("name_ctr", initial_value=0) prepare_function_kwargs = { "counter_param": counter_param, - "programs_filenames": programs_filenames, + "programs": programs, + # "programs_filenames": programs_filenames, "CC": self.instr_CC.get_instr(), } # Using the first detector of the multi-detector as this is # in charge of controlling the CC (see self.get_int_logging_detector) d.set_prepare_function( - oqh.load_range_of_oql_programs_from_filenames, + oqh.load_range_of_oql_programs, prepare_function_kwargs, detectors="first" ) - # d.nr_averages = 128 + # d.nr_averages = 128 FIXME: commented out reps_per_seed = 4094 // len(sweep_points) nr_shots = reps_per_seed * len(sweep_points) @@ -2878,245 +2863,12 @@ def send_rb_tasks(pool_): qubits[0], qubits[1], flux_codeword) + if sim_single_qubits: + label += f'_sim_{sim_single_qubits}' MC.run(label, exp_metadata={"bins": sweep_points}) - # N.B. if interleaving cliffords are used, this won't work + # FIXME: if interleaving cliffords are used, this won't work ma2.RandomizedBenchmarking_TwoQubit_Analysis(label=label) - # FIXME: Under testing by Jorge - # def measure_two_qubit_randomized_benchmarking( - # self, - # qubits, - # nr_cliffords=np.array( - # [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0, 12.0, 15.0, 20.0, 25.0, 30.0, 50.0] - # ), - # nr_seeds=100, - # interleaving_cliffords=[None], - # label="TwoQubit_RB_{}seeds_recompile={}_icl{}_{}_{}_{}", - # recompile: bool = "as needed", - # cal_points=True, - # flux_codeword="cz", - # flux_allocated_duration_ns: int = None, - # sim_cz_qubits: list = None, - # sim_single_qubits: list = None, - # compile_only: bool = False, - # pool=None, # a multiprocessing.Pool() - # rb_tasks=None, # used after called with `compile_only=True` - # MC=None - # ): - # """ - # Measures two qubit randomized benchmarking, including - # the leakage estimate. - - # [2020-07-04 Victor] this method was updated to allow for parallel - # compilation using all the cores of the measurement computer - - # Refs: - # Knill PRA 77, 012307 (2008) - # Wood PRA 97, 032306 (2018) - - # Args: - # qubits (list): - # pair of the qubit names on which to perform RB - - # nr_cliffords (array): - # lengths of the clifford sequences to perform - - # nr_seeds (int): - # number of different clifford sequences of each length - - # interleaving_cliffords (list): - # list of integers (or None) which specifies which cliffords - # to interleave the sequence with (for interleaved RB) - # For indices of Clifford group elements go to - # two_qubit_clifford_group.py - - # label (str): - # string for formatting the measurement name - - # recompile (bool, str {'as needed'}): - # indicate whether to regenerate the sequences of clifford gates. - # By default it checks whether the needed sequences were already - # generated since the most recent change of OpenQL file - # specified in self.cfg_openql_platform_fn - - # cal_points (bool): - # should calibration point (qubits in 0 and 1 states) - # be included in the measurement - - # flux_codeword (str): - # flux codeword corresponding to the Cphase gate - # sim_cz_qubits (list): - # A list of qubit names on which a simultaneous cz - # instruction must be applied. This is for characterizing - # CZ gates that are intended to be performed in parallel - # with other CZ gates. - # flux_allocated_duration_ns (list): - # Duration in ns of the flux pulse used when interleaved gate is - # [100_000], i.e. idle identity - # compilation_only (bool): - # Compile only the RB sequences without measuring, intended for - # parallelizing iRB sequences compilation with measurements - # pool (multiprocessing.Pool): - # Only relevant for `compilation_only=True` - # Pool to which the compilation tasks will be assigned - # rb_tasks (list): - # Only relevant when running `compilation_only=True` previously, - # saving the rb_tasks, waiting for them to finish then running - # this method again and providing the `rb_tasks`. - # See the interleaved RB for use case. - # """ - # if MC is None: - # MC = self.instr_MC.get_instr() - - # # Settings that have to be preserved, change is required for - # # 2-state readout and postprocessing - # old_weight_type = self.ro_acq_weight_type() - # old_digitized = self.ro_acq_digitized() - # old_avg = self.ro_acq_averages() - # self.ro_acq_weight_type("optimal IQ") - # self.ro_acq_digitized(False) - - # self.prepare_for_timedomain(qubits=qubits) - # MC.soft_avg(1) - # # The detector needs to be defined before setting back parameters - # d = self.get_int_logging_detector(qubits=qubits) - # # set back the settings - # self.ro_acq_weight_type(old_weight_type) - # self.ro_acq_digitized(old_digitized) - - # for q in qubits: - # q_instr = self.find_instrument(q) - # mw_lutman = q_instr.instr_LutMan_MW.get_instr() - # mw_lutman.load_ef_rabi_pulses_to_AWG_lookuptable() - # if sim_single_qubits: - # for q in sim_single_qubits: - # q_instr = self.find_instrument(q) - # mw_lutman = q_instr.instr_LutMan_MW.get_instr() - # mw_lutman.set_default_lutmap() - # mw_lutman.load_waveforms_onto_AWG_lookuptable() - - - # MC.soft_avg(1) - - # qubit_idxs = [self.find_instrument(q).cfg_qubit_nr() for q in qubits] - # if sim_cz_qubits is not None: - # sim_cz_qubits_idxs = [ - # self.find_instrument(q).cfg_qubit_nr() for q in sim_cz_qubits - # ] - # else: - # sim_cz_qubits_idxs = None - - # if sim_single_qubits is not None: - # sim_single_qubits_idxs = [ - # self.find_instrument(q).cfg_qubit_nr() for q in sim_single_qubits - # ] - # else: - # sim_single_qubits_idxs = None - - # net_cliffords = [0, 3 * 24 + 3] - - # programs = [] - - # print('Generating {} RB programs'.format(nr_seeds)) - # t0 = time.time() - # for i in range(nr_seeds): - # check_keyboard_interrupt() - # # p = cl_oql.randomized_benchmarking( - # # qubits=qubit_idxs, - # # nr_cliffords=nr_cliffords, - # # nr_seeds=1, - # # flux_codeword=flux_codeword, - # # flux_allocated_duration_ns=flux_allocated_duration_ns, - # # platf_cfg=self.cfg_openql_platform_fn(), - # # program_name="TwoQ_RB_int_cl_s{}_ncl{}_icl{}_{}_{}".format( - # # int(i), - # # list(map(int, nr_cliffords)), - # # interleaving_cliffords, - # # qubits[0], - # # qubits[1], - # # ), - # # interleaving_cliffords=interleaving_cliffords, - # # cal_points=cal_points, - # # net_cliffords=net_cliffords, # measures with and without inverting - # # f_state_cal_pts=True, - # # recompile=recompile, - # # sim_cz_qubits=sim_cz_qubits_idxs, - # # ) - # p = cl_oql.two_qubit_randomized_benchmarking( - # two_qubit_pair= qubit_idxs, - # single_qubits=sim_single_qubits_idxs, - # nr_cliffords=nr_cliffords, - # nr_seeds= 1, - # flux_codeword=flux_codeword, - # flux_allocated_duration_ns=flux_allocated_duration_ns, - # platf_cfg=self.cfg_openql_platform_fn(), - # program_name="TwoQ_RB_int_cl_s{}_ncl{}_icl{}_{}_{}".format( - # int(i), - # list(map(int, nr_cliffords)), - # interleaving_cliffords, - # qubits[0], - # qubits[1], - # ), - # interleaving_cliffords=interleaving_cliffords, - # cal_points=cal_points, - # two_qubit_net_cliffords=net_cliffords, - # single_qubit_net_cliffords=net_cliffords, - # f_state_cal_pts=True, - # recompile=recompile - # ) - # print(f'compiled_program {i+1}') - # programs.append(p) - - - # # to include calibration points - # if cal_points: - # sweep_points = np.append( - # np.repeat(nr_cliffords, 2), - # [nr_cliffords[-1] + 0.5] * 2 - # + [nr_cliffords[-1] + 1.5] * 2 - # + [nr_cliffords[-1] + 2.5] * 3, - # ) - # else: - # sweep_points = np.repeat(nr_cliffords, 2) - - # counter_param = ManualParameter("name_ctr", initial_value=0) - # prepare_function_kwargs = { - # "counter_param": counter_param, - # "programs": programs, - # "CC": self.instr_CC.get_instr(), - # } - - # # Using the first detector of the multi-detector as this is - # # in charge of controlling the CC (see self.get_int_logging_detector) - # d.set_prepare_function( - # oqh.load_range_of_oql_programs, - # prepare_function_kwargs, detectors="first" - # ) - # # d.nr_averages = 128 - - # reps_per_seed = 4094 // len(sweep_points) - # nr_shots = reps_per_seed * len(sweep_points) - # d.set_child_attr("nr_shots", nr_shots) - - # s = swf.None_Sweep(parameter_name="Number of Cliffords", unit="#") - - # MC.set_sweep_function(s) - # MC.set_sweep_points(np.tile(sweep_points, reps_per_seed * nr_seeds)) - - # MC.set_detector_function(d) - # label = label.format( - # nr_seeds, - # recompile, - # interleaving_cliffords, - # qubits[0], - # qubits[1], - # flux_codeword) - # if sim_single_qubits: - # label += f'_sim_{sim_single_qubits}' - # MC.run(label, exp_metadata={"bins": sweep_points}) - # # N.B. if interleaving cliffords are used, this won't work - # ma2.RandomizedBenchmarking_TwoQubit_Analysis(label=label) - def measure_interleaved_randomized_benchmarking_statistics( self, RB_type: str = "CZ", @@ -3138,8 +2890,9 @@ def measure_interleaved_randomized_benchmarking_statistics( if RB_type == "CZ": measurement_func = self.measure_two_qubit_interleaved_randomized_benchmarking - elif RB_type == "CZ_parked_qubit": - measurement_func = self.measure_single_qubit_interleaved_randomized_benchmarking_parking + # NB: measure_single_qubit_interleaved_randomized_benchmarking_parking was removed around commit b2e571fb4c5d436c69a98c9b710e3bb3018ece14 + # elif RB_type == "CZ_parked_qubit": + # measurement_func = self.measure_single_qubit_interleaved_randomized_benchmarking_parking else: raise ValueError( "RB type `{}` not recognized!".format(RB_type) @@ -3147,6 +2900,7 @@ def measure_interleaved_randomized_benchmarking_statistics( rounds_success = np.zeros(nr_iRB_runs) t0 = time.time() + # `maxtasksperchild` avoid RAM issues with multiprocessing.Pool(maxtasksperchild=cl_oql.maxtasksperchild) as pool: rb_tasks_start = None @@ -3157,9 +2911,7 @@ def measure_interleaved_randomized_benchmarking_statistics( iRB_kw["start_next_round_compilation"] = (i < last_run) round_successful = False try: - rb_tasks_start = measurement_func( - **iRB_kw - ) + rb_tasks_start = measurement_func(**iRB_kw) round_successful = True except Exception: print_exception() @@ -3176,22 +2928,18 @@ def measure_interleaved_randomized_benchmarking_statistics( def measure_two_qubit_interleaved_randomized_benchmarking( self, - qubits: list, - nr_cliffords=np.array( - [1., 3., 5., 7., 9., 11., 15., 20., 25., 30., 40., 50., 70., 90., 120.] - ), + qubits: List[str], + nr_cliffords=np.array([1., 3., 5., 7., 9., 11., 15., 20., 25., 30., 40., 50., 70., 90., 120.]), nr_seeds=100, - recompile: bool = "as needed", flux_codeword="cz", flux_allocated_duration_ns: int = None, sim_cz_qubits: list = None, measure_idle_flux: bool = True, - rb_tasks_start: list = None, - pool=None, cardinal: dict = None, - start_next_round_compilation: bool = False, - maxtasksperchild=None, - MC = None, + + MC: Optional[MeasurementControl] = None, + recompile: bool = "as needed", + parallel: bool = False ): # USED_BY: inspire_dependency_graph.py, """ @@ -3206,123 +2954,105 @@ def measure_two_qubit_interleaved_randomized_benchmarking( if MC is None: MC = self.instr_MC.get_instr() - def run_parallel_iRB( - recompile, pool, rb_tasks_start: list = None, - start_next_round_compilation: bool = False - ): - """ - We define the full parallel iRB procedure here as function such - that we can control the flow of the parallel RB sequences - compilations from the outside of this method, and allow for - chaining RB compilations for sequential measurements intended for - taking statistics of the RB performance - """ - rb_tasks_next = None + # common kwargs for calls to measure_two_qubit_randomized_benchmarking below + common_kwargs = dict( + qubits=qubits, + MC=MC, + nr_cliffords=nr_cliffords, + flux_codeword=flux_codeword, + nr_seeds=nr_seeds, + sim_cz_qubits=sim_cz_qubits, + # sim_single_qubits=sim_single_qubits, FIXME: Under testing by Jorge + recompile=recompile, + ) + + + if 0: + ################# FIXME: definition of parallel work #################### + + # def run_parallel_iRB( + # recompile, + # pool, + # rb_tasks_start: list = None, + # start_next_round_compilation: bool = False + # ): + # """ + # We define the full parallel iRB procedure here as function such + # that we can control the flow of the parallel RB sequences + # compilations from the outside of this method, and allow for + # chaining RB compilations for sequential measurements intended for + # taking statistics of the RB performance + # """ + # rb_tasks_next = None + # + # # 1. Start (non-blocking) compilation for [None] + # # We make it non-blocking such that the non-blocking feature + # # is used for the interleaved cases + # if rb_tasks_start is None: + # rb_tasks_start = self.measure_two_qubit_randomized_benchmarking( + + # define work to do + tasks_inputs = [] # 1. Start (non-blocking) compilation for [None] - # We make it non-blocking such that the non-blocking feature - # is used for the interleaved cases - if rb_tasks_start is None: - rb_tasks_start = self.measure_two_qubit_randomized_benchmarking( - qubits=qubits, - MC=MC, - nr_cliffords=nr_cliffords, - interleaving_cliffords=[None], - recompile=recompile, - flux_codeword=flux_codeword, - nr_seeds=nr_seeds, - sim_cz_qubits=sim_cz_qubits, - # FIXME: Under testing by Jorge - # sim_single_qubits=sim_single_qubits, - compile_only=True, - pool=pool - ) + task_dict = dict( + interleaving_cliffords=[None], + compile_only=True, + ) + tasks_inputs.append({**common_kwargs, **task_dict}) - # 2. Wait for [None] compilation to finish - cl_oql.wait_for_rb_tasks(rb_tasks_start) + # # 2. Wait for [None] compilation to finish + # cl_oql.wait_for_rb_tasks(rb_tasks_start) # 3. Start (non-blocking) compilation for [104368] - rb_tasks_CZ = self.measure_two_qubit_randomized_benchmarking( - qubits=qubits, - MC=MC, - nr_cliffords=nr_cliffords, + # rb_tasks_CZ = self.measure_two_qubit_randomized_benchmarking( + task_dict=dict( interleaving_cliffords=[104368], - recompile=recompile, - flux_codeword=flux_codeword, - nr_seeds=nr_seeds, - sim_cz_qubits=sim_cz_qubits, - # FIXME: Under testing by Jorge - # sim_single_qubits=sim_single_qubits, compile_only=True, - pool=pool ) + tasks_inputs.append({**common_kwargs, **task_dict}) + # 4. Start the measurement and run the analysis for [None] - self.measure_two_qubit_randomized_benchmarking( - qubits=qubits, - MC=MC, - nr_cliffords=nr_cliffords, + # self.measure_two_qubit_randomized_benchmarking( + task_dict = dict( interleaving_cliffords=[None], - recompile=False, # This of course needs to be False - flux_codeword=flux_codeword, - nr_seeds=nr_seeds, - sim_cz_qubits=sim_cz_qubits, - # FIXME: Under testing by Jorge - # sim_single_qubits=sim_single_qubits, - rb_tasks=rb_tasks_start, + # rb_tasks=rb_tasks_start, FIXME ) + tasks_inputs.append({**common_kwargs, **task_dict}) - # 5. Wait for [104368] compilation to finish - cl_oql.wait_for_rb_tasks(rb_tasks_CZ) + # # 5. Wait for [104368] compilation to finish + # cl_oql.wait_for_rb_tasks(rb_tasks_CZ) # 6. Start (non-blocking) compilation for [100_000] if measure_idle_flux: - rb_tasks_I = self.measure_two_qubit_randomized_benchmarking( - qubits=qubits, - MC=MC, - nr_cliffords=nr_cliffords, + # rb_tasks_I = self.measure_two_qubit_randomized_benchmarking( + task_dict=dict( interleaving_cliffords=[100_000], - recompile=recompile, - flux_codeword=flux_codeword, flux_allocated_duration_ns=flux_allocated_duration_ns, - nr_seeds=nr_seeds, - sim_cz_qubits=sim_cz_qubits, - # FIXME: Under testing by Jorge - # sim_single_qubits=sim_single_qubits, compile_only=True, - pool=pool, ) + tasks_inputs.append({**common_kwargs, **task_dict}) + + elif start_next_round_compilation: # Optionally send to the `pool` the tasks of RB compilation to be # used on the next round of calling the iRB method - rb_tasks_next = self.measure_two_qubit_randomized_benchmarking( - qubits=qubits, - MC=MC, - nr_cliffords=nr_cliffords, + # rb_tasks_next = self.measure_two_qubit_randomized_benchmarking( + task_dict = dict( interleaving_cliffords=[None], - recompile=recompile, - flux_codeword=flux_codeword, - nr_seeds=nr_seeds, - sim_cz_qubits=sim_cz_qubits, - # FIXME: Under testing by Jorge - # sim_single_qubits=sim_single_qubits, compile_only=True, - pool=pool ) + tasks_inputs.append({**common_kwargs, **task_dict}) + + # 7. Start the measurement and run the analysis for [104368] - self.measure_two_qubit_randomized_benchmarking( - qubits=qubits, - MC=MC, - nr_cliffords=nr_cliffords, + # self.measure_two_qubit_randomized_benchmarking( + task_dict = dict( interleaving_cliffords=[104368], - recompile=False, - flux_codeword=flux_codeword, - nr_seeds=nr_seeds, - sim_cz_qubits=sim_cz_qubits, - # FIXME: Under testing by Jorge - # sim_single_qubits=sim_single_qubits, rb_tasks=rb_tasks_CZ, ) + tasks_inputs.append({**common_kwargs, **task_dict}) ma2.InterleavedRandomizedBenchmarkingAnalysis( label_base="icl[None]", label_int="icl[104368]" @@ -3330,148 +3060,118 @@ def run_parallel_iRB( if measure_idle_flux: # 8. Wait for [100_000] compilation to finish - cl_oql.wait_for_rb_tasks(rb_tasks_I) + # cl_oql.wait_for_rb_tasks(rb_tasks_I) # 8.a. Optionally send to the `pool` the tasks of RB compilation to be # used on the next round of calling the iRB method if start_next_round_compilation: - rb_tasks_next = self.measure_two_qubit_randomized_benchmarking( - qubits=qubits, - MC=MC, - nr_cliffords=nr_cliffords, + # rb_tasks_next = self.measure_two_qubit_randomized_benchmarking( + task_dict = dict( interleaving_cliffords=[None], - recompile=recompile, - flux_codeword=flux_codeword, - nr_seeds=nr_seeds, - sim_cz_qubits=sim_cz_qubits, - # FIXME: Under testing by Jorge - # sim_single_qubits=sim_single_qubits, - compile_only=True, - pool=pool ) + tasks_inputs.append({**common_kwargs, **task_dict}) # 9. Start the measurement and run the analysis for [100_000] - self.measure_two_qubit_randomized_benchmarking( - qubits=qubits, - MC=MC, - nr_cliffords=nr_cliffords, + # self.measure_two_qubit_randomized_benchmarking( + task_dict = dict( interleaving_cliffords=[100_000], - recompile=False, - flux_codeword=flux_codeword, flux_allocated_duration_ns=flux_allocated_duration_ns, - nr_seeds=nr_seeds, - sim_cz_qubits=sim_cz_qubits, - # FIXME: Under testing by Jorge - # sim_single_qubits=sim_single_qubits, rb_tasks=rb_tasks_I ) + tasks_inputs.append({**common_kwargs, **task_dict}) ma2.InterleavedRandomizedBenchmarkingAnalysis( label_base="icl[None]", label_int="icl[104368]", label_int_idle="icl[100000]" ) - return rb_tasks_next - - if recompile or recompile == "as needed": - # This is an optimization that compiles the interleaved RB - # sequences for the next measurement while measuring the previous - # one - if pool is None: - # Using `with ...:` makes sure the other processes will be terminated - # `maxtasksperchild` avoid RAM issues - if not maxtasksperchild: - maxtasksperchild = cl_oql.maxtasksperchild - with multiprocessing.Pool(maxtasksperchild=maxtasksperchild) as pool: - run_parallel_iRB(recompile=recompile, - pool=pool, - rb_tasks_start=rb_tasks_start) - else: - # In this case the `pool` to execute the RB compilation tasks - # is provided, `rb_tasks_start` is expected to be as well - rb_tasks_next = run_parallel_iRB( - recompile=recompile, - pool=pool, - rb_tasks_start=rb_tasks_start, - start_next_round_compilation=start_next_round_compilation) - return rb_tasks_next + # return rb_tasks_next + + + ################# FIXME: end of definition of parallel work #################### + + +# if recompile or recompile == "as needed": +# # This is an optimization that compiles the interleaved RB +# # sequences for the next measurement while measuring the previous +# # one +# if pool is None: +# # `maxtasksperchild` avoid RAM issues +# if not maxtasksperchild: +# maxtasksperchild = cl_oql.maxtasksperchild +# # <<<<<<< HEAD +# +# # Using `with ...:` makes sure the other processes will be terminated +# with multiprocessing.Pool(maxtasksperchild=maxtasksperchild) as pool: +# run_parallel_iRB( +# recompile=recompile, +# pool=pool, +# rb_tasks_start=rb_tasks_start +# ) +# else: +# # In this case the `pool` to execute the RB compilation tasks +# # is provided, `rb_tasks_start` is expected to be as well +# rb_tasks_next = run_parallel_iRB( +# recompile=recompile, +# pool=pool, +# rb_tasks_start=rb_tasks_start, +# start_next_round_compilation=start_next_round_compilation +# ) +# return rb_tasks_next else: - # recompile=False no need to parallelize compilation with measurement + # sequential code version + # Perform two-qubit RB (no interleaved gate) self.measure_two_qubit_randomized_benchmarking( - qubits=qubits, - MC=MC, - nr_cliffords=nr_cliffords, - interleaving_cliffords=[None], - recompile=recompile, - flux_codeword=flux_codeword, - nr_seeds=nr_seeds, - sim_cz_qubits=sim_cz_qubits, - # FIXME: Under testing by Jorge - # sim_single_qubits=sim_single_qubits, + **common_kwargs, + interleaving_cliffords=[None] ) # Perform two-qubit RB with CZ interleaved self.measure_two_qubit_randomized_benchmarking( - qubits=qubits, - MC=MC, - nr_cliffords=nr_cliffords, - interleaving_cliffords=[104368], - recompile=recompile, - flux_codeword=flux_codeword, - nr_seeds=nr_seeds, - sim_cz_qubits=sim_cz_qubits, - # FIXME: Under testing by Jorge - # sim_single_qubits=sim_single_qubits, + **common_kwargs, + interleaving_cliffords=[104368] ) a = ma2.InterleavedRandomizedBenchmarkingAnalysis( label_base="icl[None]", - label_int="icl[104368]", + label_int="icl[104368]" ) + if cardinal: opposite_cardinal = {'NW':'SE', 'NE':'SW', 'SW':'NE', 'SE':'NW'} - self.find_instrument(qubits[0]).parameters[f'F_2QRB_{cardinal}'].set(1-a.proc_data_dict['quantities_of_interest']['eps_CZ_simple'].n) - self.find_instrument(qubits[1]).parameters[f'F_2QRB_{opposite_cardinal[cardinal]}'].set(1-a.proc_data_dict['quantities_of_interest']['eps_CZ_simple'].n) + val = 1-a.proc_data_dict['quantities_of_interest']['eps_CZ_simple'].n + self.find_instrument(qubits[0]).parameters[f'F_2QRB_{cardinal}'].set(val) + self.find_instrument(qubits[1]).parameters[f'F_2QRB_{opposite_cardinal[cardinal]}'].set(val) if measure_idle_flux: # Perform two-qubit iRB with idle identity of same duration as CZ self.measure_two_qubit_randomized_benchmarking( - qubits=qubits, - MC=MC, - nr_cliffords=nr_cliffords, + **common_kwargs, interleaving_cliffords=[100_000], - recompile=recompile, - flux_codeword=flux_codeword, - flux_allocated_duration_ns=flux_allocated_duration_ns, - nr_seeds=nr_seeds, - sim_cz_qubits=sim_cz_qubits, - # FIXME: Under testing by Jorge - # sim_single_qubits=sim_single_qubits, + flux_allocated_duration_ns=flux_allocated_duration_ns ) + ma2.InterleavedRandomizedBenchmarkingAnalysis( label_base="icl[None]", label_int="icl[104368]", label_int_idle="icl[100000]" - ) return True def measure_two_qubit_purity_benchmarking( self, - qubits, + qubits: List[str], MC, - nr_cliffords=np.array( - [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0, 12.0, 15.0, 20.0, 25.0] - ), + nr_cliffords=np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0, 12.0, 15.0, 20.0, 25.0]), nr_seeds=100, interleaving_cliffords=[None], label="TwoQubit_purityB_{}seeds_{}_{}", - recompile: bool = "as needed", cal_points: bool = True, flux_codeword: str = "cz", - ): + recompile: bool = "as needed" + ): """ Measures two qubit purity (aka unitarity) benchmarking. It is a modified RB routine which measures the length of @@ -3503,15 +3203,15 @@ def measure_two_qubit_purity_benchmarking( label (str): string for formatting the measurement name + cal_points (bool): + should calibration point (qubits in 0 and 1 states) + be included in the measurement + recompile (bool, str {'as needed'}): indicate whether to regenerate the sequences of clifford gates. By default it checks whether the needed sequences were already generated since the most recent change of OpenQL file specified in self.cfg_openql_platform_fn - - cal_points (bool): - should calibration point (qubits in 0 and 1 states) - be included in the measurement """ # Settings that have to be preserved, change is required for @@ -3528,7 +3228,7 @@ def measure_two_qubit_purity_benchmarking( d = self.get_int_logging_detector(qubits=qubits) MC.soft_avg(1) - # set back the settings + # restore settings self.ro_acq_weight_type(old_weight_type) self.ro_acq_digitized(old_digitized) @@ -3640,7 +3340,7 @@ def measure_two_qubit_purity_benchmarking( def measure_two_qubit_character_benchmarking( self, - qubits, + qubits: List[str], MC, nr_cliffords=np.array( [ @@ -3666,8 +3366,8 @@ def measure_two_qubit_character_benchmarking( interleaving_cliffords=[None, -4368], label="TwoQubit_CharBench_{}seeds_icl{}_{}_{}", flux_codeword="fl_cw_01", - recompile: bool = "as needed", ch_idxs=np.array([1, 2]), + recompile: bool = "as needed" ): # Refs: # Helsen arXiv:1806.02048v1 @@ -3683,7 +3383,7 @@ def measure_two_qubit_character_benchmarking( self.prepare_for_timedomain(qubits=qubits) MC.soft_avg(1) - # set back the settings + # restore settings d = self.get_int_logging_detector(qubits=qubits) self.ro_acq_weight_type(old_weight_type) self.ro_acq_digitized(old_digitized) @@ -3774,18 +3474,20 @@ def measure_two_qubit_character_benchmarking( def measure_two_qubit_simultaneous_randomized_benchmarking( self, - qubits, + qubits: List[str], MC: Optional[MeasurementControl] = None, nr_cliffords=2 ** np.arange(11), nr_seeds=100, interleaving_cliffords=[None], label="TwoQubit_sim_RB_{}seeds_recompile={}_{}_{}", - recompile: bool = "as needed", cal_points: bool = True, ro_acq_weight_type: str = "optimal IQ", - compile_only: bool = False, - pool=None, # a multiprocessing.Pool() - rb_tasks=None # used after called with `compile_only=True` + + recompile: bool = "as needed", + parallel: bool = False + # compile_only: bool = False, + # pool=None, # a multiprocessing.Pool() + # rb_tasks=None # used after called with `compile_only=True` ): """ Performs simultaneous single qubit RB on two qubits. @@ -3811,15 +3513,15 @@ def measure_two_qubit_simultaneous_randomized_benchmarking( label (str): string for formatting the measurement name + cal_points (bool): + should calibration point (qubits in 0, 1 and 2 states) + be included in the measurement + recompile (bool, str {'as needed'}): indicate whether to regenerate the sequences of clifford gates. By default it checks whether the needed sequences were already generated since the most recent change of OpenQL file specified in self.cfg_openql_platform_fn - - cal_points (bool): - should calibration point (qubits in 0, 1 and 2 states) - be included in the measurement """ # Settings that have to be preserved, change is required for @@ -3830,13 +3532,14 @@ def measure_two_qubit_simultaneous_randomized_benchmarking( self.ro_acq_digitized(False) self.prepare_for_timedomain(qubits=qubits) + if MC is None: MC = self.instr_MC.get_instr() MC.soft_avg(1) # The detector needs to be defined before setting back parameters d = self.get_int_logging_detector(qubits=qubits) - # set back the settings + # restore settings self.ro_acq_weight_type(old_weight_type) self.ro_acq_digitized(old_digitized) @@ -3845,55 +3548,33 @@ def measure_two_qubit_simultaneous_randomized_benchmarking( mw_lutman = q_instr.instr_LutMan_MW.get_instr() mw_lutman.load_ef_rabi_pulses_to_AWG_lookuptable() - MC.soft_avg(1) + MC.soft_avg(1) # FIXME: again - def send_rb_tasks(pool_): - tasks_inputs = [] - for i in range(nr_seeds): - task_dict = dict( - qubits=[self.find_instrument(q).cfg_qubit_nr() for q in qubits], - nr_cliffords=nr_cliffords, - nr_seeds=1, - platf_cfg=self.cfg_openql_platform_fn(), - program_name="TwoQ_Sim_RB_int_cl{}_s{}_ncl{}_{}_{}_double".format( - i, - list(map(int, nr_cliffords)), - interleaving_cliffords, - qubits[0], - qubits[1], - ), - interleaving_cliffords=interleaving_cliffords, - simultaneous_single_qubit_RB=True, - cal_points=cal_points, - net_cliffords=[0, 3], # measures with and without inverting - f_state_cal_pts=True, - recompile=recompile, - ) - tasks_inputs.append(task_dict) - # pool.starmap_async can be used for positional arguments - # but we are using a wrapper - rb_tasks = pool_.map_async(cl_oql.parallel_friendly_rb, tasks_inputs) - - return rb_tasks - - if compile_only: - assert pool is not None - rb_tasks = send_rb_tasks(pool) - return rb_tasks - - if rb_tasks is None: - # Using `with ...:` makes sure the other processes will be terminated - # avoid starting too mane processes, - # nr_processes = None will start as many as the PC can handle - nr_processes = None if recompile else 1 - with multiprocessing.Pool( - nr_processes, - maxtasksperchild=cl_oql.maxtasksperchild # avoid RAM issues - ) as pool: - rb_tasks = send_rb_tasks(pool) - cl_oql.wait_for_rb_tasks(rb_tasks) - - programs_filenames = rb_tasks.get() + # define work to do + tasks_inputs = [] + for i in range(nr_seeds): + task_dict = dict( + platf_cfg=self.cfg_openql_platform_fn(), + qubits=[self.find_instrument(q).cfg_qubit_nr() for q in qubits], + nr_cliffords=nr_cliffords, + nr_seeds=1, + program_name="TwoQ_Sim_RB_int_cl{}_s{}_ncl{}_{}_{}_double".format( + i, + list(map(int, nr_cliffords)), + interleaving_cliffords, + qubits[0], + qubits[1], + ), + interleaving_cliffords=interleaving_cliffords, + simultaneous_single_qubit_RB=True, + cal_points=cal_points, + net_cliffords=[0, 3], # measures with and without inverting + f_state_cal_pts=True, + recompile=recompile, + ) + tasks_inputs.append(task_dict) + + programs = run_tasks(cl_oql.randomized_benchmarking, tasks_inputs, parallel) # to include calibration points if cal_points: @@ -3909,14 +3590,14 @@ def send_rb_tasks(pool_): counter_param = ManualParameter("name_ctr", initial_value=0) prepare_function_kwargs = { "counter_param": counter_param, - "programs_filenames": programs_filenames, + "programs": programs, "CC": self.instr_CC.get_instr(), } # Using the first detector of the multi-detector as this is # in charge of controlling the CC (see self.get_int_logging_detector) d.set_prepare_function( - oqh.load_range_of_oql_programs_from_filenames, + oqh.load_range_of_oql_programs, prepare_function_kwargs, detectors="first" ) # d.nr_averages = 128 @@ -3958,18 +3639,20 @@ def send_rb_tasks(pool_): def measure_multi_qubit_simultaneous_randomized_benchmarking( self, - qubits, + qubits: List[str], MC: Optional[MeasurementControl] = None, nr_cliffords=2 ** np.arange(11), nr_seeds=100, - recompile: bool = "as needed", cal_points: bool = True, ro_acq_weight_type: str = "optimal IQ", - compile_only: bool = False, - pool=None, # a multiprocessing.Pool() - rb_tasks=None, # used after called with `compile_only=True label_name=None, - prepare_for_timedomain=True + prepare_for_timedomain=True, + + recompile: bool = "as needed", + parallel: bool = False + # compile_only: bool = False, + # pool=None, # a multiprocessing.Pool() + # rb_tasks=None # used after called with `compile_only=True ): """ Performs simultaneous single qubit RB on multiple qubits. @@ -3986,15 +3669,15 @@ def measure_multi_qubit_simultaneous_randomized_benchmarking( nr_seeds (int): number of different clifford sequences of each length + cal_points (bool): + should calibration point (qubits in 0, 1 and 2 states) + be included in the measurement + recompile (bool, str {'as needed'}): indicate whether to regenerate the sequences of clifford gates. By default it checks whether the needed sequences were already generated since the most recent change of OpenQL file specified in self.cfg_openql_platform_fn - - cal_points (bool): - should calibration point (qubits in 0, 1 and 2 states) - be included in the measurement """ # Settings that have to be preserved, change is required for @@ -4012,7 +3695,7 @@ def measure_multi_qubit_simultaneous_randomized_benchmarking( # The detector needs to be defined before setting back parameters d = self.get_int_logging_detector(qubits=qubits) - # set back the settings + # restore settings self.ro_acq_weight_type(old_weight_type) self.ro_acq_digitized(old_digitized) @@ -4023,50 +3706,29 @@ def measure_multi_qubit_simultaneous_randomized_benchmarking( MC.soft_avg(1) - def send_rb_tasks(pool_): - tasks_inputs = [] - for i in range(nr_seeds): - task_dict = dict( - qubits=[self.find_instrument(q).cfg_qubit_nr() for q in qubits], - nr_cliffords=nr_cliffords, - nr_seeds=1, - platf_cfg=self.cfg_openql_platform_fn(), - program_name="MultiQ_RB_s{}_ncl{}_{}".format( - i, - list(map(int, nr_cliffords)), - '_'.join(qubits) - ), - interleaving_cliffords=[None], - simultaneous_single_qubit_RB=True, - cal_points=cal_points, - net_cliffords=[0, 3], # measures with and without inverting - f_state_cal_pts=True, - recompile=recompile, - ) - tasks_inputs.append(task_dict) - # pool.starmap_async can be used for positional arguments - # but we are using a wrapper - rb_tasks = pool_.map_async(cl_oql.parallel_friendly_rb, tasks_inputs) - return rb_tasks - - if compile_only: - assert pool is not None - rb_tasks = send_rb_tasks(pool) - return rb_tasks - - if rb_tasks is None: - # Using `with ...:` makes sure the other processes will be terminated - # avoid starting too mane processes, - # nr_processes = None will start as many as the PC can handle - nr_processes = None if recompile else 1 - with multiprocessing.Pool( - nr_processes, - maxtasksperchild=cl_oql.maxtasksperchild # avoid RAM issues - ) as pool: - rb_tasks = send_rb_tasks(pool) - cl_oql.wait_for_rb_tasks(rb_tasks) - - programs_filenames = rb_tasks.get() + # define work to do + tasks_inputs = [] + for i in range(nr_seeds): + task_dict = dict( + qubits=[self.find_instrument(q).cfg_qubit_nr() for q in qubits], + nr_cliffords=nr_cliffords, + nr_seeds=1, + platf_cfg=self.cfg_openql_platform_fn(), + program_name="MultiQ_RB_s{}_ncl{}_{}".format( + i, + list(map(int, nr_cliffords)), + '_'.join(qubits) + ), + interleaving_cliffords=[None], + simultaneous_single_qubit_RB=True, + cal_points=cal_points, + net_cliffords=[0, 3], # measures with and without inverting + f_state_cal_pts=True, + recompile=recompile, + ) + tasks_inputs.append(task_dict) + + programs = run_tasks(cl_oql.randomized_benchmarking, tasks_inputs, parallel) # to include calibration points if cal_points: @@ -4082,14 +3744,14 @@ def send_rb_tasks(pool_): counter_param = ManualParameter("name_ctr", initial_value=0) prepare_function_kwargs = { "counter_param": counter_param, - "programs_filenames": programs_filenames, + "programs": programs, "CC": self.instr_CC.get_instr(), } # Using the first detector of the multi-detector as this is # in charge of controlling the CC (see self.get_int_logging_detector) d.set_prepare_function( - oqh.load_range_of_oql_programs_from_filenames, + oqh.load_range_of_oql_programs, prepare_function_kwargs, detectors="first" ) # d.nr_averages = 128 @@ -4248,7 +3910,7 @@ def measure_multi_ramsey( def measure_multi_AllXY( self, - qubits: list = None, + qubits: List[str] = None, MC: Optional[MeasurementControl] = None, double_points=True, termination_opt=0.08 @@ -4769,7 +4431,7 @@ def calibrate_optimal_weights_mux( def calibrate_mux_ro( self, - qubits, + qubits: List[str], calibrate_optimal_weights=True, calibrate_threshold=True, # option should be here but is currently not implemented: @@ -5328,13 +4990,13 @@ def calibrate_parity_model_phases( """ Measures parity check as part of a flux dance for `B_sweep_points` different SNZ B values for each gate defined in `parity_check`. - Runs parity check model optimization analysis, which fits - a linear dependence of the parity check model phase error given - the measured error of each B value, to determine the B value required + Runs parity check model optimization analysis, which fits + a linear dependence of the parity check model phase error given + the measured error of each B value, to determine the B value required to achieve an error of zero. Args: - parity_check: + parity_check: List of qubits and gate directions which define the flux_lutmans to be used. Parking qubits are not used for this routine, and can be replaced with an empty list. Assumed format: [ [[ancilla_qubit]], [[data_qubit]], [[gate_direction]], [[parking_qubits]] ] @@ -5360,7 +5022,7 @@ def calibrate_parity_model_phases( flux_lm = self.find_instrument(f"flux_lm_{control_qubit[0]}") else: flux_lm = self.find_instrument(f"flux_lm_{target_qubit[0]}") - + old_B = flux_lm.parameters[f"vcz_amp_fine_{gate_direction[0]}"]() sweep_points = a_tools.get_values_around(old_B, range_frac=B_sweep_range_frac, num_points=B_sweep_n_points) @@ -5374,12 +5036,12 @@ def calibrate_parity_model_phases( old_weight_type = self.ro_acq_weight_type() self.ro_acq_digitized(False) self.ro_acq_weight_type('optimal') - + all_qubits = target_qubit + control_qubits_all if prepare_for_timedomain: # Take care of readout order (by feedline/UHF) if self.qubits_by_feedline(): - all_qubits = sorted(all_qubits, + all_qubits = sorted(all_qubits, key=lambda x: [i for i, feedline in enumerate(self.qubits_by_feedline()) \ if x in feedline]) log.info(f"Sorted qubits for readout preparation: {all_qubits}") @@ -5391,10 +5053,10 @@ def calibrate_parity_model_phases( # generate model terms to use for labels controls_qubits_sorted = [qb for qb in all_qubits if qb != target_qubit[0]] - control_combinations = [elem for k in range(1, len(controls_qubits_sorted)+1) + control_combinations = [elem for k in range(1, len(controls_qubits_sorted)+1) for elem in itt.combinations(controls_qubits_sorted, k)] model_terms = [target_qubit[0]] - model_terms += [ target_qubit[0] + ',' + qbs + model_terms += [ target_qubit[0] + ',' + qbs for qbs in [','.join(comb) for comb in control_combinations] ] d = det.Function_Detector( @@ -5415,7 +5077,7 @@ def calibrate_parity_model_phases( ) s = swf.FLsweep( - lm=flux_lm, + lm=flux_lm, par=flux_lm.parameters[f"vcz_amp_fine_{gate_direction[0]}"], waveform_name=f"cz_{gate_direction[0]}", upload_waveforms_always=True @@ -5442,7 +5104,7 @@ def calibrate_parity_model_phases( print(repr(e)) print(e.__traceback__) log.error(logging.traceback.format_exc()) - + # reset B! flux_lm.parameters[f"vcz_amp_fine_{gate_direction[0]}"](old_B) @@ -5495,7 +5157,7 @@ def calibrate_snz_fine_landscape( horizontal_calibration == True: Horizontal calibration mode, parity check will be optimized while whole flux dance specified by `flux_codeword` and `flux_dance_steps` is played. - + Args: Raises: @@ -5557,8 +5219,8 @@ def calibrate_snz_fine_landscape( 'extract_only': True, 'disable_metadata': True}, # TODO adapt for nested lists - value_names=[f'cost_function_val_{pair}', - f'delta_phi_{pair}', + value_names=[f'cost_function_val_{pair}', + f'delta_phi_{pair}', f'missing_fraction_{pair}'], result_keys=[f'cost_function_val_{pair}', f'delta_phi_{pair}', @@ -5576,18 +5238,18 @@ def calibrate_snz_fine_landscape( else: flux_lm = self.find_instrument(f"flux_lm_{pair[1]}") - # TODO: bypass waveform upload in sweep functions to save time by avoiding + # TODO: bypass waveform upload in sweep functions to save time by avoiding # repeated upload of the same parameters. # Waveforms can be updated and uploaded only in the detector function - # which should be enough since the detector function is called by the MC + # which should be enough since the detector function is called by the MC # only after new sweep function values are set. # But somehow this was not working during an initial test. - sweep_function_1 = swf.FLsweep(lm=flux_lm, + sweep_function_1 = swf.FLsweep(lm=flux_lm, par=flux_lm.parameters[f"vcz_amp_sq_{gate_direction}"], waveform_name=f"cz_{gate_direction}", # bypass_waveform_upload=True, - upload_waveforms_always=True) - sweep_function_2 = swf.FLsweep(lm=flux_lm, + upload_waveforms_always=True) + sweep_function_2 = swf.FLsweep(lm=flux_lm, par=flux_lm.parameters[f"vcz_amp_fine_{gate_direction}"], waveform_name=f"cz_{gate_direction}", # bypass_waveform_upload=True, @@ -5597,16 +5259,16 @@ def calibrate_snz_fine_landscape( log.info(f"Flux codeword: {flux_codeword}, flux dance steps: {flux_dance_steps}") if adaptive_target_cost is not None: - # target cost value can be computed by: + # target cost value can be computed by: # target_cost = cf.parity_check_cost( - # phase_diff=185, - # phase_weight=0.5, + # phase_diff=185, + # phase_weight=0.5, # missing_fraction=0.02) # convergence threshold strangely has to be given in loss function, not here - goal = lndm.mk_min_threshold_goal_func(max_pnts_beyond_threshold=2) + goal = lndm.mk_min_threshold_goal_func(max_pnts_beyond_threshold=2) else: goal = lndm.mk_minimization_goal_func() - + loss = lndm.mk_minimization_loss_func( max_no_improve_in_local=6, converge_below=adaptive_target_cost, @@ -5642,7 +5304,7 @@ def calibrate_snz_fine_landscape( log.info(f"Optimization result: {result['opt_res']}") log.info(f"A = {flux_lm.parameters[f'vcz_amp_sq_{gate_direction}']()}," f"B = {flux_lm.parameters[f'vcz_amp_fine_{gate_direction}']()}") - + if update: if horizontal_calibration: # Heatmap analysis currently doesn't work for msmt format of horizontal calibration @@ -5691,7 +5353,7 @@ def calibrate_snz_fine_landscape( self.measure_parity_check_flux_dance( target_qubits=[pair[0]], control_qubits=[pair[1]], - ramsey_qubits=ramsey_qubits, + ramsey_qubits=ramsey_qubits, flux_dance_steps=flux_dance_steps, flux_codeword=flux_codeword, prepare_for_timedomain=True, @@ -5710,7 +5372,7 @@ def calibrate_snz_fine_landscape( log.error(logging.traceback.format_exc()) def measure_vcz_A_B_landscape( - self, + self, Q0, Q1, A_ranges, @@ -5719,8 +5381,8 @@ def measure_vcz_A_B_landscape( Q_parks: list = None, flux_codeword: str = 'cz'): """ - Perform 2D sweep of amplitude and wave parameter while measuring - conditional phase and missing fraction via the "conditional + Perform 2D sweep of amplitude and wave parameter while measuring + conditional phase and missing fraction via the "conditional oscillation" experiment. Q0 : High frequency qubit(s). Can be given as single qubit or list. @@ -5740,7 +5402,7 @@ def measure_vcz_A_B_landscape( nested_MC = self.instr_nested_MC.get_instr() # get gate directions directions = [get_gate_directions(q0, q1) for q0, q1 in zip(Q0, Q1)] - + # Time-domain preparation # Prepare for time domain self.prepare_for_timedomain( @@ -5765,7 +5427,7 @@ def wrapper(Q0, Q1, extract_only, disable_metadata): a = self.measure_conditional_oscillation_multi( - pairs=[[Q0[i], Q1[i]] for i in range(len(Q0))], + pairs=[[Q0[i], Q1[i]] for i in range(len(Q0))], parked_qbs=Q_parks, flux_codeword=flux_codeword, prepare_for_timedomain=prepare_for_timedomain, @@ -5777,8 +5439,8 @@ def wrapper(Q0, Q1, for i in range(len(Q0)) } mf = { f'missing_fraction_{i+1}' : a[f'pair_{i+1}_missing_frac_a']\ for i in range(len(Q0)) } - return { **cp, **mf} - + return { **cp, **mf} + d = det.Function_Detector( wrapper, msmt_kw={'Q0' : Q0, 'Q1' : Q1, @@ -5816,7 +5478,7 @@ def wrapper(Q0, Q1, def measure_vcz_A_tmid_landscape( - self, + self, Q0, Q1, T_mids, @@ -5826,8 +5488,8 @@ def measure_vcz_A_tmid_landscape( Tp : float = None, flux_codeword: str = 'cz'): """ - Perform 2D sweep of amplitude and wave parameter while measuring - conditional phase and missing fraction via the "conditional + Perform 2D sweep of amplitude and wave parameter while measuring + conditional phase and missing fraction via the "conditional oscillation" experiment. Q0 : High frequency qubit(s). Can be given as single qubit or list. @@ -5879,7 +5541,7 @@ def wrapper(Q0, Q1, extract_only, disable_metadata): a = self.measure_conditional_oscillation_multi( - pairs=[[Q0[i], Q1[i]] for i in range(len(Q0))], + pairs=[[Q0[i], Q1[i]] for i in range(len(Q0))], parked_qbs=Q_parks, flux_codeword=flux_codeword, prepare_for_timedomain=prepare_for_timedomain, @@ -5891,7 +5553,7 @@ def wrapper(Q0, Q1, for i in range(len(Q0)) } mf = { f'missing_fraction_{i+1}' : a[f'pair_{i+1}_missing_frac_a']\ for i in range(len(Q0)) } - return { **cp, **mf} + return { **cp, **mf} d = det.Function_Detector( wrapper, @@ -5915,7 +5577,7 @@ def wrapper(Q0, Q1, n_points=A_points) swf2 = swf.flux_t_middle_sweep( fl_lm_tm = list(np.array([[Flux_lm_0[i], Flux_lm_1[i] ]\ - for i in range(len(Q0))]).flatten()), + for i in range(len(Q0))]).flatten()), fl_lm_park = Flux_lms_park, which_gate = list(np.array(directions).flatten()), t_pulse = Tp) diff --git a/pycqed/measurement/openql_experiments/clifford_rb_oql.py b/pycqed/measurement/openql_experiments/clifford_rb_oql.py index e2529aa76b..4c9ed7dd11 100644 --- a/pycqed/measurement/openql_experiments/clifford_rb_oql.py +++ b/pycqed/measurement/openql_experiments/clifford_rb_oql.py @@ -5,9 +5,16 @@ import logging import numpy as np from importlib import reload +import multiprocessing +# import dill # FIXME: work on wrapping OqlProgram in dill + +from typing import List, Dict + from pycqed.measurement.randomized_benchmarking import randomized_benchmarking as rb from pycqed.measurement.openql_experiments.openql_helpers import OqlProgram +# import pycqed.measurement.openql_experiments.openql_helpers as oqh +# OqlProgram = oqh.OqlProgram from pycqed.measurement.randomized_benchmarking.two_qubit_clifford_group import ( SingleQubitClifford, TwoQubitClifford, @@ -28,30 +35,31 @@ maxtasksperchild = 4 -def parallel_friendly_rb(rb_kw_dict): - """ - A wrapper around `randomized_benchmarking` such that we collect only - the filenames of the resulting programs that can be communicated back to - the main process when parallelizing the compilation using the python - multiprocessing capabilities. - """ - p = randomized_benchmarking(**rb_kw_dict) - - return p.filename - -def parallel_friendly_rb_2(rb_kw_dict): - """ - A wrapper around `randomized_benchmarking` such that we collect only - the filenames of the resulting programs that can be communicated back to - the main process when parallelizing the compilation using the python - multiprocessing capabilities. - """ - p = two_qubit_randomized_benchmarking(**rb_kw_dict) - - return p.filename - - -def wait_for_rb_tasks(rb_tasks, refresh_rate: float = 4): +# FIXME +# def parallel_friendly_rb(rb_kw_dict): +# """ +# A wrapper around `randomized_benchmarking` such that we collect only +# the filenames of the resulting programs that can be communicated back to +# the main process when parallelizing the compilation using the python +# multiprocessing capabilities. +# """ +# p = randomized_benchmarking(**rb_kw_dict) +# +# return p.filename +# +# def parallel_friendly_rb_2(rb_kw_dict): +# """ +# A wrapper around `randomized_benchmarking` such that we collect only +# the filenames of the resulting programs that can be communicated back to +# the main process when parallelizing the compilation using the python +# multiprocessing capabilities. +# """ +# p = two_qubit_randomized_benchmarking(**rb_kw_dict) +# +# return p.filename + + +def wait_for_rb_tasks(rb_tasks, refresh_interval: float = 4): """ Blocks the main process till all tasks in `rb_tasks` are done """ @@ -61,6 +69,7 @@ def wait_for_rb_tasks(rb_tasks, refresh_rate: float = 4): # it is an internal number of groups of compilation tasks (chunks) # It is enough to have an indication of progress without # compromising the efficiency + # FIXME: uses internal private data structure print( "{} RB compilation tasks left." " Elapsed waiting {:>7.1f}s".format( @@ -71,14 +80,68 @@ def wait_for_rb_tasks(rb_tasks, refresh_rate: float = 4): # check for keyboard interrupt because generating can be slow check_keyboard_interrupt() - time.sleep(refresh_rate) + time.sleep(refresh_interval) print("\nDone compiling RB sequences!") +# FIXME: WIP on runner. Move to separate file +def _wrap_result_with_dill(func, parameters: Dict): + # return dill.dumps(func(**parameters)) + return func(**parameters) + + # print(f"Starting processing of function {func}") + # d = dill.dumps(func(**parameters)) + # print("Processing done") + # return None + +def run_tasks(func, parameter_list: List[Dict], parallel: bool=False): + ret = [] + if not parallel: + i = 0 + for parameters in parameter_list: + name = parameters["program_name"] # FIXME: assumes that program_name exists + log.info(f"Executing task {i}: '{name}'") + ret.append(func(**parameters)) + # ret.append(func(parameters)) # assumes wrapper + # print(f"par[{i}] = {parameters}") + i += 1 + else: + raise NotImplementedError("parallel compilation is still Work In Progress") + + with multiprocessing.Pool( + processes=4, # FIXME: get from global + maxtasksperchild=maxtasksperchild + ) as pool: + # FIXME: testing dill + # print("wrapping OqlProgram") + # p = OqlProgram + # dill.dumps(p) + # print("wrapping done") + + log.info(f"Asynchronously starting {len(parameter_list)} tasks") + async_result_list = [] + for parameters in parameter_list: + async_result = pool.apply_async(_wrap_result_with_dill, [func, parameters]) + async_result_list.append(async_result) + + for async_result in async_result_list: + # retrieve result of asynchronous function call, catching errors to ease debugging + try: + result_dill = async_result.get() + except Exception as e: + log.error(f"Asynchronous function call returned '{e}'") + raise + + # result = dill.loads(result_dill) + result = result_dill + ret.append(result) + + return ret + def randomized_benchmarking( - qubits: list, platf_cfg: str, + qubits: list, nr_cliffords, nr_seeds: int, net_cliffords: list = [0], @@ -99,13 +162,13 @@ def randomized_benchmarking( # FIXME: split into separate functions for different types of RB """ Input pars: + platf_cfg: + filename of the platform config file + qubits: list of ints specifying qubit indices. based on the length this function detects if it should generate a single or two or multi qubit RB sequence. - platf_cfg: - filename of the platform config file - nr_cliffords: list nr_cliffords for which to generate RB seqs @@ -501,183 +564,183 @@ def randomized_benchmarking( return p -# def two_qubit_randomized_benchmarking( -# two_qubit_pair: list, -# single_qubits: list, -# platf_cfg: str, -# nr_cliffords, -# nr_seeds: int, -# two_qubit_net_cliffords: list = [0], -# single_qubit_net_cliffords: list = [0], -# max_clifford_idx: int = 11520, -# flux_codeword: str = "cz", -# flux_allocated_duration_ns: int = None, -# interleaving_cliffords=[None], -# program_name: str = "randomized_benchmarking", -# cal_points: bool = True, -# f_state_cal_pts: bool = True, -# recompile: bool = True, -# ): - -# assert len(two_qubit_net_cliffords) == len(single_qubit_net_cliffords) - -# two_qubit_map = {f'q{i}' : qb for i, qb in enumerate(two_qubit_pair)} -# if single_qubits != None: -# single_qubit_map = {f'q{i}' : qb for i, qb in enumerate(single_qubits)} - -# p = oqh.create_program(program_name, platf_cfg) - -# this_file = inspect.getfile(inspect.currentframe()) - -# # Ensure that programs are recompiled when changing the code as well -# recompile_dict = oqh.check_recompilation_needed_hash_based( -# program_fn=p.filename, -# platf_cfg=platf_cfg, -# clifford_rb_oql=this_file, -# recompile=recompile, -# ) - -# if not recompile_dict["recompile"]: -# os.rename(recompile_dict["tmp_file"], recompile_dict["file"]) -# return p - -# if 100_000 in interleaving_cliffords and flux_allocated_duration_ns is None: -# # Try to get the flux duration from the cfg file -# with open(platf_cfg) as json_file: -# loaded_json = json.load(json_file) -# try: -# flux_allocated_duration_ns = loaded_json["instructions"]["sf_cz_se q0"][ -# "duration" -# ] -# except KeyError: -# raise ValueError("Could not find flux duration. Specify manually!") - -# for seed in range(nr_seeds): -# for j, n_cl in enumerate(nr_cliffords): -# for interleaving_cl in interleaving_cliffords: - -# # Generate 2-qubit sequence -# for net_clifford_2q, net_clifford_1q in zip(two_qubit_net_cliffords, single_qubit_net_cliffords): -# two_cl_seq = rb.randomized_benchmarking_sequence( -# n_cl, -# number_of_qubits=2, -# desired_net_cl=net_clifford_2q, -# max_clifford_idx=max_clifford_idx, -# interleaving_cl=interleaving_cl, -# ) -# net_two_cl_seq = rb.calculate_net_clifford(two_cl_seq, TwoQubitClifford) -# # decompose -# two_cl_seq_decomposed = [] -# for cl in two_cl_seq: -# # benchmarking only CZ (not as a member of CNOT group) -# if cl == 104368: # 104368 = 100_000 + CZ -# two_cl_seq_decomposed.append([("CZ", ["q0", "q1"])]) -# # benchmarking only idling identity, with duration of cz -# # see below where wait-time is added -# elif cl == 100_000: -# two_cl_seq_decomposed.append([("I", ["q0", "q1"])]) -# else: -# two_cl_seq_decomposed.append(TwoQubitClifford(cl).gate_decomposition) - -# # Generate single-qubit sequence -# if single_qubits != None: -# Single_cl_seq = {} -# net_Single_cl_seq = {} -# Single_cl_seq_decomposed = dict.fromkeys(single_qubits) -# for single_qubit in single_qubits: -# Single_cl_seq[single_qubit] = rb.randomized_benchmarking_sequence( -# n_cl, -# number_of_qubits=1, -# desired_net_cl=net_clifford_1q, -# max_clifford_idx=max_clifford_idx, -# ) -# net_Single_cl_seq[single_qubit] = rb.calculate_net_clifford(Single_cl_seq[single_qubit], SingleQubitClifford) -# Single_cl_seq_decomposed[single_qubit] = [] -# for cl in Single_cl_seq[single_qubit]: -# Single_cl_seq_decomposed[single_qubit].append(SingleQubitClifford(cl).gate_decomposition) - - -# # # generate OpenQL kernel for every net_clifford -# # for net_clifford in net_cliffords: -# # create decomposed sequence including recovery -# two_recovery_to_idx_clifford = net_two_cl_seq.get_inverse() -# two_recovery_clifford = TwoQubitClifford(net_clifford_2q) * two_recovery_to_idx_clifford -# two_cl_seq_decomposed_with_net = two_cl_seq_decomposed + [ -# two_recovery_clifford.gate_decomposition -# ] -# if single_qubits != None: -# for single_qubit in single_qubits: -# single_recovery_to_idx_clifford = net_Single_cl_seq[single_qubit].get_inverse() -# single_recovery_clifford = SingleQubitClifford(net_clifford_1q) * single_recovery_to_idx_clifford -# single_cl_seq_decomposed_with_net = Single_cl_seq_decomposed[single_qubit] + [ -# single_recovery_clifford.gate_decomposition -# ] - -# k = oqh.create_kernel( -# "RB_{}Cl_s{}_net{}_inter{}".format( -# int(n_cl), seed, net_clifford_2q, interleaving_cl -# ), -# p, -# ) -# for qubit_idx in two_qubit_map.values(): -# k.prepz(qubit_idx) -# if single_qubits != None: -# for qubit_idx in single_qubit_map.values(): -# k.prepz(qubit_idx) - -# print(two_cl_seq_decomposed_with_net) -# if single_qubits != None: -# print(single_cl_seq_decomposed_with_net) -# # print(len(two_cl_seq_decomposed_with_net), len(single_cl_seq_decomposed_with_net)) - -# for i, gates in enumerate(two_cl_seq_decomposed_with_net): - -# if i%2 == 0 and single_qubit != None: -# for g1, q1 in single_cl_seq_decomposed_with_net[i//2]: -# k.gate(g1, [single_qubit_map[q1]]) - -# for g, q in gates: -# if isinstance(q, str): # single qubit gate -# k.gate(g, [two_qubit_map[q]]) -# elif isinstance(q, list): # 2 qubit gate -# if g == "I": -# # interleaving an idling with the length of the CZ -# k.gate("wait", [], 0) # alignment -# k.gate("wait", [], flux_allocated_duration_ns) -# k.gate("wait", [], 0) -# else: -# k.gate("wait", [], 0) -# k.gate( -# flux_codeword, list(two_qubit_map.values()) -# ) # fix for QCC -# k.gate("wait", [], 0) -# # Measurement -# k.gate("wait", [], 0) -# for qubit_idx in two_qubit_map.values(): -# k.measure(qubit_idx) -# k.gate("wait", [], 0) -# p.add_kernel(k) - -# if cal_points: -# if f_state_cal_pts: -# combinations = ["00", "01", "10", "11", "02", "20", "22"] -# else: -# combinations = ["00", "01", "10", "11"] -# p = oqh.add_multi_q_cal_points( -# p, qubits=two_qubit_pair, combinations=combinations -# ) - -# p = oqh.compile(p) -# # Just before returning we rename the hashes file as an indication of the -# # integrity of the RB code -# os.rename(recompile_dict["tmp_file"], recompile_dict["file"]) -# return p +def two_qubit_randomized_benchmarking_original( + platf_cfg: str, + two_qubit_pair: list, + single_qubits: list, + nr_cliffords, + nr_seeds: int, + two_qubit_net_cliffords: list = [0], + single_qubit_net_cliffords: list = [0], + max_clifford_idx: int = 11520, + flux_codeword: str = "cz", + flux_allocated_duration_ns: int = None, + interleaving_cliffords=[None], + program_name: str = "randomized_benchmarking", + cal_points: bool = True, + f_state_cal_pts: bool = True, + recompile: bool = True, +): + + assert len(two_qubit_net_cliffords) == len(single_qubit_net_cliffords) + + two_qubit_map = {f'q{i}' : qb for i, qb in enumerate(two_qubit_pair)} + if single_qubits != None: + single_qubit_map = {f'q{i}' : qb for i, qb in enumerate(single_qubits)} + + p = oqh.create_program(program_name, platf_cfg) + + this_file = inspect.getfile(inspect.currentframe()) + + # Ensure that programs are recompiled when changing the code as well + recompile_dict = oqh.check_recompilation_needed_hash_based( + program_fn=p.filename, + platf_cfg=platf_cfg, + clifford_rb_oql=this_file, + recompile=recompile, + ) + + if not recompile_dict["recompile"]: + os.rename(recompile_dict["tmp_file"], recompile_dict["file"]) + return p + + if 100_000 in interleaving_cliffords and flux_allocated_duration_ns is None: + # Try to get the flux duration from the cfg file + with open(platf_cfg) as json_file: + loaded_json = json.load(json_file) + try: + flux_allocated_duration_ns = loaded_json["instructions"]["sf_cz_se q0"][ + "duration" + ] + except KeyError: + raise ValueError("Could not find flux duration. Specify manually!") + + for seed in range(nr_seeds): + for j, n_cl in enumerate(nr_cliffords): + for interleaving_cl in interleaving_cliffords: + + # Generate 2-qubit sequence + for net_clifford_2q, net_clifford_1q in zip(two_qubit_net_cliffords, single_qubit_net_cliffords): + two_cl_seq = rb.randomized_benchmarking_sequence( + n_cl, + number_of_qubits=2, + desired_net_cl=net_clifford_2q, + max_clifford_idx=max_clifford_idx, + interleaving_cl=interleaving_cl, + ) + net_two_cl_seq = rb.calculate_net_clifford(two_cl_seq, TwoQubitClifford) + # decompose + two_cl_seq_decomposed = [] + for cl in two_cl_seq: + # benchmarking only CZ (not as a member of CNOT group) + if cl == 104368: # 104368 = 100_000 + CZ + two_cl_seq_decomposed.append([("CZ", ["q0", "q1"])]) + # benchmarking only idling identity, with duration of cz + # see below where wait-time is added + elif cl == 100_000: + two_cl_seq_decomposed.append([("I", ["q0", "q1"])]) + else: + two_cl_seq_decomposed.append(TwoQubitClifford(cl).gate_decomposition) + + # Generate single-qubit sequence + if single_qubits != None: + Single_cl_seq = {} + net_Single_cl_seq = {} + Single_cl_seq_decomposed = dict.fromkeys(single_qubits) + for single_qubit in single_qubits: + Single_cl_seq[single_qubit] = rb.randomized_benchmarking_sequence( + n_cl, + number_of_qubits=1, + desired_net_cl=net_clifford_1q, + max_clifford_idx=max_clifford_idx, + ) + net_Single_cl_seq[single_qubit] = rb.calculate_net_clifford(Single_cl_seq[single_qubit], SingleQubitClifford) + Single_cl_seq_decomposed[single_qubit] = [] + for cl in Single_cl_seq[single_qubit]: + Single_cl_seq_decomposed[single_qubit].append(SingleQubitClifford(cl).gate_decomposition) + + + # # generate OpenQL kernel for every net_clifford + # for net_clifford in net_cliffords: + # create decomposed sequence including recovery + two_recovery_to_idx_clifford = net_two_cl_seq.get_inverse() + two_recovery_clifford = TwoQubitClifford(net_clifford_2q) * two_recovery_to_idx_clifford + two_cl_seq_decomposed_with_net = two_cl_seq_decomposed + [ + two_recovery_clifford.gate_decomposition + ] + if single_qubits != None: + for single_qubit in single_qubits: + single_recovery_to_idx_clifford = net_Single_cl_seq[single_qubit].get_inverse() + single_recovery_clifford = SingleQubitClifford(net_clifford_1q) * single_recovery_to_idx_clifford + single_cl_seq_decomposed_with_net = Single_cl_seq_decomposed[single_qubit] + [ + single_recovery_clifford.gate_decomposition + ] + + k = oqh.create_kernel( + "RB_{}Cl_s{}_net{}_inter{}".format( + int(n_cl), seed, net_clifford_2q, interleaving_cl + ), + p, + ) + for qubit_idx in two_qubit_map.values(): + k.prepz(qubit_idx) + if single_qubits != None: + for qubit_idx in single_qubit_map.values(): + k.prepz(qubit_idx) + + print(two_cl_seq_decomposed_with_net) + if single_qubits != None: + print(single_cl_seq_decomposed_with_net) + # print(len(two_cl_seq_decomposed_with_net), len(single_cl_seq_decomposed_with_net)) + + for i, gates in enumerate(two_cl_seq_decomposed_with_net): + + if i%2 == 0 and single_qubit != None: + for g1, q1 in single_cl_seq_decomposed_with_net[i//2]: + k.gate(g1, [single_qubit_map[q1]]) + + for g, q in gates: + if isinstance(q, str): # single qubit gate + k.gate(g, [two_qubit_map[q]]) + elif isinstance(q, list): # 2 qubit gate + if g == "I": + # interleaving an idling with the length of the CZ + k.barrier() # alignment + k.gate("wait", [], flux_allocated_duration_ns) + k.barrier() + else: + k.barrier() + k.gate( + flux_codeword, list(two_qubit_map.values()) + ) # fix for QCC + k.barrier() + # Measurement + k.barrier() + for qubit_idx in two_qubit_map.values(): + k.measure(qubit_idx) + k.barrier() + p.add_kernel(k) + + if cal_points: + if f_state_cal_pts: + combinations = ["00", "01", "10", "11", "02", "20", "22"] + else: + combinations = ["00", "01", "10", "11"] + p = oqh.add_multi_q_cal_points( + p, qubits=two_qubit_pair, combinations=combinations + ) + + p = oqh.compile(p) + # Just before returning we rename the hashes file as an indication of the + # integrity of the RB code + os.rename(recompile_dict["tmp_file"], recompile_dict["file"]) + return p def two_qubit_randomized_benchmarking( + platf_cfg: str, two_qubit_pair: list, single_qubits: list, - platf_cfg: str, nr_cliffords, nr_seeds: int, two_qubit_net_cliffords: list = [0], @@ -690,7 +753,7 @@ def two_qubit_randomized_benchmarking( cal_points: bool = True, f_state_cal_pts: bool = True, recompile: bool = True, -): +) -> OqlProgram: assert len(two_qubit_net_cliffords) == len(single_qubit_net_cliffords) @@ -778,7 +841,7 @@ def two_qubit_randomized_benchmarking( two_recovery_clifford.gate_decomposition ] # Jorge 6-4-2022: Fixme, recovery clifford for simultaneous - # single qubit RB of spectators is not working. + # single qubit RB of spectators is not working. # if single_qubits != None: # for sq in single_qubits: # single_recovery_to_idx_clifford = net_Single_cl_seq[sq].get_inverse() @@ -804,7 +867,7 @@ def two_qubit_randomized_benchmarking( # print(len(two_cl_seq_decomposed_with_net), len(single_cl_seq_decomposed_with_net)) for i, gates in enumerate(two_cl_seq_decomposed_with_net): - + if i%2 == 0 and single_qubits != None: for sq in single_qubits: for g1, q1 in Single_cl_seq_decomposed[sq][i//2]: @@ -816,21 +879,21 @@ def two_qubit_randomized_benchmarking( elif isinstance(q, list): # 2 qubit gate if g == "I": # interleaving an idling with the length of the CZ - k.gate("wait", [], 0) # alignment + k.barrier() # alignment k.gate("wait", [], flux_allocated_duration_ns) - k.gate("wait", [], 0) + k.barrier() else: - k.gate("wait", [], 0) + k.barrier() k.gate( flux_codeword, list(two_qubit_map.values()) ) # fix for QCC - k.gate("wait", [], 0) + k.barrier() # Measurement - k.gate("wait", [], 0) + k.barrier() for qubit_idx in two_qubit_map.values(): k.measure(qubit_idx) - k.gate("wait", [], 0) + k.barrier() p.add_kernel(k) if cal_points: @@ -838,7 +901,7 @@ def two_qubit_randomized_benchmarking( combinations = ["00", "01", "10", "11", "02", "20", "22"] else: combinations = ["00", "01", "10", "11"] - + p.add_multi_q_cal_points( qubits=two_qubit_pair, combinations=combinations ) diff --git a/pycqed/measurement/openql_experiments/generate_CC_cfg.py b/pycqed/measurement/openql_experiments/generate_CC_cfg.py index aa213bc5cb..edd221e6c5 100644 --- a/pycqed/measurement/openql_experiments/generate_CC_cfg.py +++ b/pycqed/measurement/openql_experiments/generate_CC_cfg.py @@ -10,8 +10,10 @@ from pathlib import Path from datetime import datetime +from deprecated import deprecated +@deprecated(version='0.4', reason='please use pycqed.measurement.openql_experiments.generate_CC_cfg_modular.generate_config_modular') def generate_config(out_filename: str, mw_pulse_duration: int = 20, flux_pulse_duration: int = 40, diff --git a/pycqed/measurement/openql_experiments/openql_helpers.py b/pycqed/measurement/openql_experiments/openql_helpers.py index f7f11acc67..8d5673da1a 100644 --- a/pycqed/measurement/openql_experiments/openql_helpers.py +++ b/pycqed/measurement/openql_experiments/openql_helpers.py @@ -949,7 +949,7 @@ def check_recompilation_needed_hash_based( raise DeprecationWarning("use OqlProgram.check_recompilation_needed_hash_based") -@deprecated(reason="Use `check_recompilation_needed_hash_based`!") +@deprecated(reason="Use `OqlProgram.check_recompilation_needed_hash_based`!") def check_recompilation_needed( program_fn: str, platf_cfg: str, diff --git a/pycqed/measurement/randomized_benchmarking/clifford_decompositions.py b/pycqed/measurement/randomized_benchmarking/clifford_decompositions.py index 3d6ed487d1..d63f42556c 100644 --- a/pycqed/measurement/randomized_benchmarking/clifford_decompositions.py +++ b/pycqed/measurement/randomized_benchmarking/clifford_decompositions.py @@ -79,5 +79,5 @@ for i in range(3-len(el)): el.append('I') -# assigning to this variable for legacy reasons +# FIXME: assigning to this variable for legacy reasons gate_decomposition = epstein_efficient_decomposition diff --git a/pycqed/measurement/randomized_benchmarking/clifford_group.py b/pycqed/measurement/randomized_benchmarking/clifford_group.py index 41398fb27d..247dd84f30 100644 --- a/pycqed/measurement/randomized_benchmarking/clifford_group.py +++ b/pycqed/measurement/randomized_benchmarking/clifford_group.py @@ -1,5 +1,5 @@ import numpy as np -from pycqed.simulations.pauli_transfer_matrices import I, X, Y, Z, S, S2, H, CZ +from pycqed.simulations.pauli_transfer_matrices import I, X, Y, Z, S, S2, H #, CZ ''' Decomposition of the single qubit clifford group as per Epstein et al. Phys. Rev. A 89, 062321 (2014) diff --git a/pycqed/measurement/randomized_benchmarking/generate_clifford_hash_tables.py b/pycqed/measurement/randomized_benchmarking/generate_clifford_hash_tables.py index 2d25073971..7fd54fad7d 100644 --- a/pycqed/measurement/randomized_benchmarking/generate_clifford_hash_tables.py +++ b/pycqed/measurement/randomized_benchmarking/generate_clifford_hash_tables.py @@ -1,10 +1,11 @@ -from pycqed.measurement.randomized_benchmarking.two_qubit_clifford_group \ - import SingleQubitClifford, TwoQubitClifford from os.path import join, dirname, abspath from os import mkdir import numpy as np from zlib import crc32 +from pycqed.measurement.randomized_benchmarking.two_qubit_clifford_group import SingleQubitClifford, TwoQubitClifford + + output_dir = join(abspath(dirname(__file__)), 'clifford_hash_tables') try: mkdir(output_dir) @@ -24,14 +25,12 @@ def construct_clifford_lookuptable(generator, indices): def generate_hash_tables(): print("Generating Clifford hash tables.") - single_qubit_hash_lut = construct_clifford_lookuptable( - SingleQubitClifford, np.arange(24)) + single_qubit_hash_lut = construct_clifford_lookuptable(SingleQubitClifford, np.arange(24)) with open(join(output_dir, 'single_qubit_hash_lut.txt'), 'w') as f: for h in single_qubit_hash_lut: f.write(str(h)+'\n') - two_qubit_hash_lut = construct_clifford_lookuptable( - TwoQubitClifford, np.arange(11520)) + two_qubit_hash_lut = construct_clifford_lookuptable(TwoQubitClifford, np.arange(11520)) with open(join(output_dir, 'two_qubit_hash_lut.txt'), 'w') as f: for h in two_qubit_hash_lut: f.write(str(h)+'\n') diff --git a/pycqed/measurement/randomized_benchmarking/randomized_benchmarking.py b/pycqed/measurement/randomized_benchmarking/randomized_benchmarking.py index 95117d7e08..45c21d9da6 100644 --- a/pycqed/measurement/randomized_benchmarking/randomized_benchmarking.py +++ b/pycqed/measurement/randomized_benchmarking/randomized_benchmarking.py @@ -15,13 +15,17 @@ def calculate_net_clifford( Calculate the net-clifford from a list of cliffords indices. Args: - rb_clifford_indices: list or array of integers specifying the cliffords. - Cliff : Clifford object used to determine what + rb_clifford_indices: + list or array of integers specifying the cliffords. + + Cliff: + Clifford object used to determine what inversion technique to use and what indices are valid. Valid choices are `SingleQubitClifford` and `TwoQubitClifford` Returns: - net_clifford: a `Clifford` object containing the net-clifford. + net_clifford: + a `Clifford` object containing the net-clifford. the Clifford index is contained in the Clifford.idx attribute. Note: the order corresponds to the order in a pulse sequence but is @@ -35,7 +39,7 @@ def calculate_net_clifford( # used to treat CZ as CZ and not the member of CNOT-like set of gates # Using negative sign convention (i.e. `-4368` for the interleaved CZ) # was a bad choice because there is no such thing as negative zero and - # the clifford numer 0 is the identity that is necessary for + # the clifford number 0 is the identity that is necessary for # benchmarking an idling identity with the same duration as the time # allocated to the flux pulses, for example # cliff = Clifford(abs(idx)) # Deprecated! @@ -60,6 +64,82 @@ def calculate_net_clifford( return net_clifford +############################################################################## +# New style RB sequences (using the hash-table method) compatible +# with Clifford object. +# More advanced sequences are available using this method. +############################################################################## + +def randomized_benchmarking_sequence( + n_cl: int, + desired_net_cl: int = 0, + number_of_qubits: int = 1, + max_clifford_idx: int = 11520, + interleaving_cl: int = None, + seed: int = None, +) -> np.ndarray: + """ + Generates a randomized benchmarking sequence for the one or two qubit + clifford group. + + Args: + n_cl (int) : number of Cliffords + desired_net_cl (int) : idx of the desired net clifford, if None is + specified no recovery Clifford is calculated + number_of_qubits(int): used to determine if Cliffords are drawn + from the single qubit or two qubit clifford group. + max_clifford_idx (int): used to set the index of the highest random + clifford generated. Useful to generate e.g., simultaneous two + qubit RB sequences. + FIXME: seems useless, because none of the callers set this for real, and we trim it to the group size + interleaving_cl (int): interleaves the sequence with a specific + clifford if desired + seed (int) : seed used to initialize the random number + generator. + Returns: + list of clifford indices (ints) + + N.B. in the case of the 1 qubit clifford group this function does the + same as "randomized_benchmarking_sequence_old" but + does not use the 24 by 24 lookuptable method to calculate the + net clifford. It instead uses the "Clifford" objects used in + constructing the two qubit Clifford classes. + The old method exists to establish the equivalence between the two methods. + + """ + + if number_of_qubits == 1: + Cl = SingleQubitClifford + group_size = np.min([24, max_clifford_idx]) + elif number_of_qubits == 2: + Cl = TwoQubitClifford + group_size = np.min([11520, max_clifford_idx]) + else: + raise NotImplementedError() + + # Generate a random sequence of Cliffords + # Even if no seed is provided make sure we pick a new state such that + # it is safe to run generate and compile the random sequences in + # parallel using multiprocess + rng_seed = np.random.RandomState(seed) + rb_clifford_indices = rng_seed.randint(0, group_size, int(n_cl)) + + # Add interleaving cliffords if applicable + if interleaving_cl is not None: + rb_clif_ind_intl = np.empty(rb_clifford_indices.size * 2, dtype=int) + rb_clif_ind_intl[0::2] = rb_clifford_indices + rb_clif_ind_intl[1::2] = interleaving_cl + rb_clifford_indices = rb_clif_ind_intl + + if desired_net_cl is not None: + # Calculate the net clifford + net_clifford = calculate_net_clifford(rb_clifford_indices, Cl) + + # determine the inverse of the sequence + recovery_to_idx_clifford = net_clifford.get_inverse() + recovery_clifford = Cl(desired_net_cl) * recovery_to_idx_clifford + rb_clifford_indices = np.append(rb_clifford_indices, recovery_clifford.idx) + return rb_clifford_indices # FIXME: deprecate along with randomized_benchmarking_sequence_old() def calculate_recovery_clifford(cl_in, desired_cl=0): @@ -138,81 +218,3 @@ def randomized_benchmarking_sequence_old( rb_cliffords = np.append(rb_cliffords, recovery_clifford) return rb_cliffords - - -############################################################################## -# New style RB sequences (using the hash-table method) compatible -# with Clifford object. -# More advanced sequences are available using this method. -############################################################################## - -def randomized_benchmarking_sequence( - n_cl: int, - desired_net_cl: int = 0, - number_of_qubits: int = 1, - max_clifford_idx: int = 11520, - interleaving_cl: int = None, - seed: int = None, -) -> np.ndarray: - """ - Generates a randomized benchmarking sequence for the one or two qubit - clifford group. - - Args: - n_cl (int) : number of Cliffords - desired_net_cl (int) : idx of the desired net clifford, if None is - specified no recovery Clifford is calculated - number_of_qubits(int): used to determine if Cliffords are drawn - from the single qubit or two qubit clifford group. - max_clifford_idx (int): used to set the index of the highest random - clifford generated. Useful to generate e.g., simultaneous two - qubit RB sequences. - FIXME: seems useless, because none of the callers set this for real, and we trim it to the group size - interleaving_cl (int): interleaves the sequence with a specific - clifford if desired - seed (int) : seed used to initialize the random number - generator. - Returns: - list of clifford indices (ints) - - N.B. in the case of the 1 qubit clifford group this function does the - same as "randomized_benchmarking_sequence_old" but - does not use the 24 by 24 lookuptable method to calculate the - net clifford. It instead uses the "Clifford" objects used in - constructing the two qubit Clifford classes. - The old method exists to establish the equivalence between the two methods. - - """ - - if number_of_qubits == 1: - Cl = SingleQubitClifford - group_size = np.min([24, max_clifford_idx]) - elif number_of_qubits == 2: - Cl = TwoQubitClifford - group_size = np.min([11520, max_clifford_idx]) - else: - raise NotImplementedError() - - # Generate a random sequence of Cliffords - # Even if no seed is provided make sure we pick a new state such that - # it is safe to run generate and compile the random sequences in - # parallel using multiprocess - rng_seed = np.random.RandomState(seed) - rb_clifford_indices = rng_seed.randint(0, group_size, int(n_cl)) - - # Add interleaving cliffords if applicable - if interleaving_cl is not None: - rb_clif_ind_intl = np.empty(rb_clifford_indices.size * 2, dtype=int) - rb_clif_ind_intl[0::2] = rb_clifford_indices - rb_clif_ind_intl[1::2] = interleaving_cl - rb_clifford_indices = rb_clif_ind_intl - - if desired_net_cl is not None: - # Calculate the net clifford - net_clifford = calculate_net_clifford(rb_clifford_indices, Cl) - - # determine the inverse of the sequence - recovery_to_idx_clifford = net_clifford.get_inverse() - recovery_clifford = Cl(desired_net_cl) * recovery_to_idx_clifford - rb_clifford_indices = np.append(rb_clifford_indices, recovery_clifford.idx) - return rb_clifford_indices diff --git a/pycqed/measurement/randomized_benchmarking/two_qubit_clifford_group.py b/pycqed/measurement/randomized_benchmarking/two_qubit_clifford_group.py index 573c8ffcd2..82282281ef 100644 --- a/pycqed/measurement/randomized_benchmarking/two_qubit_clifford_group.py +++ b/pycqed/measurement/randomized_benchmarking/two_qubit_clifford_group.py @@ -1,106 +1,63 @@ +""" +FIXME: handling of cliffords is spread all over the place: +- this file provides classes Clifford, SingleQubitClifford and TwoQubitClifford +- pycqed.measurement.randomized_benchmarking.clifford_group defines clifford_group_single_qubit, and generate_clifford_lookuptable +- pycqed.measurement.randomized_benchmarking.generate_clifford_hash_tables provides generate_hash_tables +- pycqed.measurement.randomized_benchmarking.clifford_decompositions provides decompositions +- pycqed.simulations.pauli_transfer_matrices provides transfer matrices + +And then there are hardcoded Clifford IDs and group sizes overywhere +""" + import numpy as np from zlib import crc32 from os.path import join, dirname, abspath -from pycqed.measurement.randomized_benchmarking.clifford_group import clifford_group_single_qubit as C1, CZ, S1 -from pycqed.measurement.randomized_benchmarking.clifford_decompositions import epstein_efficient_decomposition - -hash_dir = join(abspath(dirname(__file__)), 'clifford_hash_tables') - -""" -This file contains Clifford decompositions for the two qubit Clifford group. - -The Clifford decomposition closely follows two papers: -Corcoles et al. Process verification ... Phys. Rev. A. 2013 - http://journals.aps.org/pra/pdf/10.1103/PhysRevA.87.030301 -for the different classes of two-qubit Cliffords. - -and -Barends et al. Superconducting quantum circuits at the ... Nature 2014 - https://www.nature.com/articles/nature13171?lang=en -for writing the cliffords in terms of CZ gates. - - -########################################################################### -2-qubit clifford decompositions - -The two qubit clifford group (C2) consists of 11520 two-qubit cliffords -These gates can be subdivided into four classes. - 1. The Single-qubit like class | 576 elements (24^2) - 2. The CNOT-like class | 5184 elements (24^2 * 3^2) - 3. The iSWAP-like class | 5184 elements (24^2 * 3^2) - 4. The SWAP-like class | 576 elements (24^2) - --------------------------------|------------- + - Two-qubit Clifford group C2 | 11520 elements - - -1. The Single-qubit like class - -- C1 -- - -- C1 -- - -2. The CNOT-like class - --C1--•--S1-- --C1--•--S1------ - | -> | - --C1--⊕--S1-- --C1--•--S1^Y90-- - -3. The iSWAP-like class - --C1--*--S1-- --C1--•---Y90--•--S1^Y90-- - | -> | | - --C1--*--S1-- --C1--•--mY90--•--S1^X90-- -4. The SWAP-like class - --C1--x-- --C1--•-mY90--•--Y90--•------- - | -> | | | - --C1--x-- --C1--•--Y90--•-mY90--•--Y90-- - -C1: element of the single qubit Clifford group - N.B. we use the decomposition defined in Epstein et al. here - -S1: element of the S1 group, a subgroup of the single qubit Clifford group - -S1[0] = I -S1[1] = rY90, rX90 -S1[2] = rXm90, rYm90 +from pycqed.measurement.randomized_benchmarking.clifford_group import clifford_group_single_qubit as C1 # the full group +from pycqed.measurement.randomized_benchmarking.clifford_group import S1 # the S1 subgroup of C1 +from pycqed.simulations.pauli_transfer_matrices import CZ +from pycqed.measurement.randomized_benchmarking.clifford_decompositions import epstein_efficient_decomposition -Important clifford indices: - I : Cl 0 - X90 : Cl 16 - Y90 : Cl 21 - X180 : Cl 3 - Y180 : Cl 6 - Z180 : Cl 9 - CZ : 4368 +hash_dir = join(abspath(dirname(__file__)), 'clifford_hash_tables') -""" -# set as a module wide variable instead of argument to function for speed -# reasons +# set as a module wide variable instead of argument to function for speed reasons gate_decomposition = epstein_efficient_decomposition -# used to transform the S1 subgroup +# matrices used to transform the S1 subgroup +# FIXME: handle indices vs name in single place only X90 = C1[16] Y90 = C1[21] mY90 = C1[15] # A dict containing clifford IDs with common names. -common_cliffords = {'I': 0, 'X': 3, 'Y': 6, 'Z': 9, - 'II': 0, 'IX': 3, 'IY': 6, 'IZ': 9, +# FIXME: should be separate per Clifford class. Hardly used +# FIXME: handle indices vs name in single place only +common_cliffords = { + # in SingleQubitClifford: + 'I': 0, 'X': 3, 'Y': 6, 'Z': 9, - 'XI': 24*3 + 0, 'XX': 24*3 + 3, - 'XY': 24*3 + 6, 'XZ': 24*3 + 9, + 'X90': 16, + 'Y90': 21, + 'X180': 3, + 'Y180': 6, + 'Z180': 9, - 'YI': 24*6 + 0, 'YX': 24*6 + 3, - 'YY': 24*6 + 6, 'YZ': 24*6 + 9, + # in TwoQubitClifford: + 'II': 0, 'IX': 3, 'IY': 6, 'IZ': 9, - 'ZI': 24*9 + 0, 'ZX': 24*9 + 3, - 'ZY': 24*9 + 6, 'ZZ': 24*9 + 9, + 'XI': 24*3 + 0, 'XX': 24*3 + 3, + 'XY': 24*3 + 6, 'XZ': 24*3 + 9, - 'X90': 16, - 'Y90': 21, - 'X180': 3, - 'Y180': 6, - 'Z180': 9, - 'CZ': 104368, - } + 'YI': 24*6 + 0, 'YX': 24*6 + 3, + 'YY': 24*6 + 6, 'YZ': 24*6 + 9, + + 'ZI': 24*9 + 0, 'ZX': 24*9 + 3, + 'ZY': 24*9 + 6, 'ZZ': 24*9 + 9, + + # single qubit gates (hack) when using TwoQubitClifford + 'CZ': 104368 # 100000 + 576 + 14*24 + 2*1728 +} class Clifford(object): @@ -180,6 +137,73 @@ def _get_clifford_id(cls, pauli_transfer_matrix): return idx +""" +This class contains Clifford decompositions for the two qubit Clifford group. + +The Clifford decomposition closely follows two papers: +Corcoles et al. Process verification ... Phys. Rev. A. 2013 + http://journals.aps.org/pra/pdf/10.1103/PhysRevA.87.030301 +for the different classes of two-qubit Cliffords. + +and +Barends et al. Superconducting quantum circuits at the ... Nature 2014 + https://www.nature.com/articles/nature13171?lang=en +for writing the cliffords in terms of CZ gates. + + +########################################################################### +2-qubit clifford decompositions + +The two qubit clifford group (C2) consists of 11520 two-qubit cliffords +These gates can be subdivided into four classes. + 1. The Single-qubit like class | 576 elements (24^2) + 2. The CNOT-like class | 5184 elements (24^2 * 3^2) + 3. The iSWAP-like class | 5184 elements (24^2 * 3^2) + 4. The SWAP-like class | 576 elements (24^2) + --------------------------------|------------- + + Two-qubit Clifford group C2 | 11520 elements + + +1. The Single-qubit like class + -- C1 -- + -- C1 -- + +2. The CNOT-like class + --C1--•--S1-- --C1--•--S1------ + | -> | + --C1--⊕--S1-- --C1--•--S1^Y90-- + +3. The iSWAP-like class + --C1--*--S1-- --C1--•---Y90--•--S1^Y90-- + | -> | | + --C1--*--S1-- --C1--•--mY90--•--S1^X90-- + +4. The SWAP-like class + --C1--x-- --C1--•-mY90--•--Y90--•------- + | -> | | | + --C1--x-- --C1--•--Y90--•-mY90--•--Y90-- + +C1: element of the single qubit Clifford group + N.B. we use the decomposition defined in Epstein et al. here + +S1: element of the S1 group, a subgroup of the single qubit Clifford group + +S1[0] = I +S1[1] = rY90, rX90 +S1[2] = rXm90, rYm90 + +Important clifford indices: + + I : Cl 0 + X90 : Cl 16 + Y90 : Cl 21 + X180 : Cl 3 + Y180 : Cl 6 + Z180 : Cl 9 + CZ : 4368 + +""" + class TwoQubitClifford(Clifford): # class constants GRP_SIZE_CLIFFORD = SingleQubitClifford.GRP_SIZE @@ -331,7 +355,7 @@ def CNOT_like_gates(cls, idx): C1_q0 = [(g, 'q0') for g in gate_decomposition[idx_0]] C1_q1 = [(g, 'q1') for g in gate_decomposition[idx_1]] - CZ = [('CZ', ['q0', 'q1'])] + CZ = [('CZ', ['q0', 'q1'])] # FIXME: shadows 'CZ' from outer scope, more occurrences below idx_2s = SingleQubitClifford._get_clifford_id(S1[idx_2]) S1_q0 = [(g, 'q0') for g in gate_decomposition[idx_2s]] @@ -469,6 +493,8 @@ def SWAP_like_gates(cls, idx): # It is important that this check is after the Clifford objects as otherwise # it is impossible to generate the hash tables ############################################################################## + +# FIXME: handle all hash table handling to single file/class try: open(join(hash_dir, 'single_qubit_hash_lut.txt'), 'r') # FIXME: also check 'two_qubit_hash_lut.txt' diff --git a/pycqed/simulations/pauli_transfer_matrices.py b/pycqed/simulations/pauli_transfer_matrices.py index 9a06709f30..d50c607b6c 100644 --- a/pycqed/simulations/pauli_transfer_matrices.py +++ b/pycqed/simulations/pauli_transfer_matrices.py @@ -1,4 +1,6 @@ import numpy as np +from deprecated import deprecated + """ This file contains pauli transfer matrices for all basic qubit operations. """ @@ -106,6 +108,7 @@ def Z_theta(theta:float, unit='deg'): # ############################################################################## +@deprecated(version='0.4', reason='not used within pyqed (except tests)') def process_fidelity(ptm_0, ptm_1, d: int=None): """ Calculates the average process fidelity between two pauli transfer matrices @@ -122,6 +125,7 @@ def process_fidelity(ptm_0, ptm_1, d: int=None): return np.dot(ptm_0.T, ptm_1).trace()/(d**2) +@deprecated(version='0.4', reason='not used within pyqed (except tests)') def average_gate_fidelity(ptm_0, ptm_1, d: int=None): """ @@ -141,6 +145,7 @@ def average_gate_fidelity(ptm_0, ptm_1, d: int=None): F_avg_gate = process_fid_to_avg_gate_fid(F_pro, d) return F_avg_gate +@deprecated(version='0.4', reason='not used within pyqed (except tests)') def process_fid_to_avg_gate_fid(F_pro: float, d:int): """ Converts diff --git a/pycqed/tests/dev_qubit_objs/test_device_objects.py b/pycqed/tests/dev_qubit_objs/test_device_objects.py index 5d3fcd6b12..53aaa99c15 100644 --- a/pycqed/tests/dev_qubit_objs/test_device_objects.py +++ b/pycqed/tests/dev_qubit_objs/test_device_objects.py @@ -4,6 +4,7 @@ import numpy as np import os import pathlib +import logging import pycqed as pq @@ -38,6 +39,7 @@ output_path = pathlib.Path(this_path) / 'test_output_cc' platf_cfg_path = output_path / 'config_cc_s17_direct_iq_openql_0_10.json' +log = logging.getLogger(__name__) class Test_Device_obj(unittest.TestCase): # FIXME: using setUpClass is more efficient, but failing tests tend to influence each other, making debugging difficult @@ -48,9 +50,20 @@ def setUp(cls): """ This sets up a mock setup using a CC to control multiple qubits """ + + log.info("starting setUp") # generate OpenQL configuration gen.generate_config_modular(platf_cfg_path) + # close all instruments, since a failing test may not have called tearDown, also see: + # https://github.com/QCoDeS/Qcodes/issues/528 + log.info("closing all instruments before we start") + try: + Instrument.close_all() + except Exception as e: + print(f"Caught exception during tearDown: {str(e)}") + log.info("done closing all instruments") + cls.station = station.Station() @@ -177,7 +190,7 @@ def setUp(cls): # q.mw_vsm_delay(15) q.mw_mixer_offs_GI(0.1) q.mw_mixer_offs_GQ(0.2) - q.mw_mixer_offs_DI(0.3) + q.mw_mixer_offs_DI(0.3) # FIXME q.mw_mixer_offs_DQ(0.4) # Set up the device object and set required params @@ -230,12 +243,18 @@ def setUp(cls): } cls.device.dio_map(cls.dio_map_CC) + log.info("setUp finished") # FIXME # @classmethod # def tearDownClass(cls): def tearDown(self): - Instrument.close_all() + log.info("starting tearDown") + try: + Instrument.close_all() + except Exception as e: + print(f"Caught exception during tearDown: {str(e)}") + log.info("tearDown finished") ############################################## # HAL_Shim_MQ @@ -678,8 +697,70 @@ def test_base_lutman_make(self): # FIXME: split into separate test class, like in test_qubit_objects.py ############################################## - def test_measure_two_qubit_randomized_benchmarking(self): - self.device.measure_two_qubit_randomized_benchmarking(qubits=["q8", "q10"]) + def test_measure_two_qubit_randomized_benchmarking_sequential(self): + self.device.measure_two_qubit_randomized_benchmarking( + qubits=["q8", "q10"], + nr_seeds=10 + ) + + # # FIXME: add other parallel variants once they work + def test_measure_two_qubit_randomized_benchmarking_parallel(self): + log.info("starting test_measure_two_qubit_randomized_benchmarking_parallel") + with self.assertRaises(NotImplementedError): # FIXME: for now + self.device.measure_two_qubit_randomized_benchmarking( + qubits=["q8", "q10"], + nr_seeds=10, + parallel=True + ) + log.info("test_measure_two_qubit_randomized_benchmarking_parallel finished") + + + # FIXME: add: measure_interleaved_randomized_benchmarking_statistics + + # FIXME: fails: + # pycqed/tests/dev_qubit_objs/test_device_objects.py:699: + # _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + # pycqed/instrument_drivers/meta_instrument/HAL_Device.py:3145: in measure_two_qubit_interleaved_randomized_benchmarking + # sim_cz_qubits=sim_cz_qubits, + # pycqed/instrument_drivers/meta_instrument/HAL_Device.py:2751: in measure_two_qubit_randomized_benchmarking + # self.prepare_for_timedomain(qubits=qubits) + # pycqed/instrument_drivers/meta_instrument/HAL/HAL_ShimMQ.py:227: in prepare_for_timedomain + # self.prepare_readout(qubits=qubits, reduced=reduced) + # pycqed/instrument_drivers/meta_instrument/HAL/HAL_ShimMQ.py:182: in prepare_readout + # self._prep_ro_sources(qubits=qubits) + # pycqed/instrument_drivers/meta_instrument/HAL/HAL_ShimMQ.py:671: in _prep_ro_sources + # LO = self.find_instrument(qubits[0]).instr_LO_ro.get_instr() + # _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + def test_measure_two_qubit_interleaved_randomized_benchmarking(self): + log.info("starting test_measure_two_qubit_interleaved_randomized_benchmarking") + self.device.measure_two_qubit_interleaved_randomized_benchmarking( + qubits=["q8", "q10"], + nr_seeds=10, + measure_idle_flux=False # FIXME: default of 'True' makes test fail with 'Instrument q8 has been removed' as shown above + ) + log.info("test_measure_two_qubit_interleaved_randomized_benchmarking finished") + + # FIXME: measure_two_qubit_purity_benchmarking + + # FIXME: measure_two_qubit_character_benchmarking + + # FIXME: measure_two_qubit_simultaneous_randomized_benchmarking + + # FIXME: measure_multi_qubit_simultaneous_randomized_benchmarking + + + def test_measure_two_qubit_simultaneous_randomized_benchmarking(self): + self.device.measure_two_qubit_simultaneous_randomized_benchmarking( + qubits=["q8", "q10"], + nr_seeds=10 + ) + + def test_measure_multi_qubit_simultaneous_randomized_benchmarking(self): + self.device.measure_multi_qubit_simultaneous_randomized_benchmarking( + qubits=["q8", "q10"], + nr_seeds=10 + ) + def test_measure_two_qubit_allxy(self): self.device.measure_two_qubit_allxy("q8", "q10", detector="int_avg") diff --git a/pycqed/tests/openql/test_cqasm.py b/pycqed/tests/openql/test_cqasm.py index ff0aeea5cc..07f2996328 100644 --- a/pycqed/tests/openql/test_cqasm.py +++ b/pycqed/tests/openql/test_cqasm.py @@ -16,7 +16,7 @@ this_path = pathlib.Path(__file__).parent -output_path = pathlib.Path(this_path) / 'test_output_cc' +output_path = this_path / 'test_output_cc' platf_cfg_path = output_path / 'config_cc_s17_direct_iq_openql_0_10.json' @@ -27,7 +27,7 @@ def setUpClass(cls): gen.generate_config_modular(platf_cfg_path) OqlProgram.output_dir = str(output_path) - if oqh.is_compatible_openql_version_cc(): # we require unreleased version not yet available for CI + if oqh.is_compatible_openql_version_cc(): def test_nested_rus_angle_0(self): ancilla1_idx = 10 ancilla2_idx = 8 diff --git a/requirements.txt b/requirements.txt index ae5dfce460..99cd14d788 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,7 +18,7 @@ pyqtgraph matplotlib autodepgraph networkx -qutechopenql>=0.10.3 +qutechopenql>=0.10.4 spirack zhinst packaging @@ -26,7 +26,7 @@ deprecated adaptive>=0.10.0 scikit-optimize>=0.5.2 scikit-learn==0.23.1 # Tests started to fail on 2020-08-05 due to 0.23.2 -h5py>=2.6, <3.0 # FIXME: 3.0 breaks measurement_analysis.py +h5py>=2.6, <3.0 # FIXME: 3.0 breaks measurement_analysis.py. Install breaks on Python 3.9 IPython>=4.0 ipywidgets>=4.1 lmfit>=0.9.5