diff --git a/deprecated/pycqed/instrument_drivers/meta_instrument/heterodyne.py b/deprecated/pycqed/instrument_drivers/meta_instrument/heterodyne.py index 432de1f679..c82cfb863b 100644 --- a/deprecated/pycqed/instrument_drivers/meta_instrument/heterodyne.py +++ b/deprecated/pycqed/instrument_drivers/meta_instrument/heterodyne.py @@ -157,10 +157,10 @@ def prepare_UHFQC(self): # Upload the correct integration weigths if self.single_sideband_demod(): self._acquisition_instr.prepare_SSB_weight_and_rotation( - IF=self.f_RO_mod(), weight_function_I=0, weight_function_Q=1) + IF=self.f_RO_mod(), weight_chI=0, weight_chQ=1) else: self._acquisition_instr.prepare_DSB_weight_and_rotation( - IF=self.f_RO_mod(), weight_function_I=0, weight_function_Q=1) + IF=self.f_RO_mod(), weight_chI=0, weight_chQ=1) if self._UHFQC_awg_parameters_changed and self.auto_seq_loading(): # self._acquisition_instr.awg_sequence_acquisition() # Dirty hack to get UHFLI to cooperate @@ -553,10 +553,10 @@ def prepare_UHFQC(self): # prepare weights and rotation if self.single_sideband_demod(): self._acquisition_instr.prepare_SSB_weight_and_rotation( - IF=self.f_RO_mod(), weight_function_I=0, weight_function_Q=1) + IF=self.f_RO_mod(), weight_chI=0, weight_chQ=1) else: self._acquisition_instr.prepare_DSB_weight_and_rotation( - IF=self.f_RO_mod(), weight_function_I=0, weight_function_Q=1) + IF=self.f_RO_mod(), weight_chI=0, weight_chQ=1) # this sets the result to integration and rotation outcome self._acquisition_instr.qas_0_result_source(2) diff --git a/deprecated/pycqed/instrument_drivers/meta_instrument/qubit_objects/CC_transmon.py b/deprecated/pycqed/instrument_drivers/meta_instrument/qubit_objects/CC_transmon.py index 1fff8831c9..8277478d16 100644 --- a/deprecated/pycqed/instrument_drivers/meta_instrument/qubit_objects/CC_transmon.py +++ b/deprecated/pycqed/instrument_drivers/meta_instrument/qubit_objects/CC_transmon.py @@ -421,13 +421,13 @@ def prepare_readout(self): if self.RO_acq_weights() == 'SSB': UHFQC.prepare_SSB_weight_and_rotation( IF=self.f_RO_mod(), - weight_function_I=self.RO_acq_weight_function_I(), - weight_function_Q=self.RO_acq_weight_function_Q()) + weight_chI=self.RO_acq_weight_function_I(), + weight_chQ=self.RO_acq_weight_function_Q()) elif self.RO_acq_weights() == 'DSB': UHFQC.prepare_DSB_weight_and_rotation( IF=self.f_RO_mod(), - weight_function_I=self.RO_acq_weight_function_I(), - weight_function_Q=self.RO_acq_weight_function_Q()) + weight_chI=self.RO_acq_weight_function_I(), + weight_chQ=self.RO_acq_weight_function_Q()) elif self.RO_acq_weights() == 'optimal': if (self.RO_optimal_weights_I() is None or self.RO_optimal_weights_Q() is None): diff --git a/deprecated/pycqed/instrument_drivers/meta_instrument/qubit_objects/Tektronix_driven_transmon.py b/deprecated/pycqed/instrument_drivers/meta_instrument/qubit_objects/Tektronix_driven_transmon.py index 0cd9e9588b..fbf0d6dda0 100644 --- a/deprecated/pycqed/instrument_drivers/meta_instrument/qubit_objects/Tektronix_driven_transmon.py +++ b/deprecated/pycqed/instrument_drivers/meta_instrument/qubit_objects/Tektronix_driven_transmon.py @@ -850,8 +850,8 @@ def measure_ssro(self, no_fits=False, MC=MC, AWG=self.AWG.get_instr(), acquisition_instr=self._acquisition_instr, pulse_pars=self.pulse_pars, RO_pars=self.RO_pars, IF=self.f_RO_mod(), - weight_function_I=self.RO_acq_weight_function_I(), - weight_function_Q=self.RO_acq_weight_function_Q(), + weight_chI=self.RO_acq_weight_function_I(), + weight_chQ=self.RO_acq_weight_function_Q(), nr_shots=nr_shots, one_weight_function_UHFQC=one_weight_function_UHFQC, optimized_weights=optimized_weights, integration_length=self.RO_acq_integration_length(), diff --git a/pycqed/__init__.py b/pycqed/__init__.py index b6fb25fd23..b4a02f5091 100644 --- a/pycqed/__init__.py +++ b/pycqed/__init__.py @@ -2,6 +2,7 @@ from pycqed.version import __version__ import sys +import warnings if 1: # FIXME: hack should be removed module_name = "qcodes" @@ -13,7 +14,12 @@ "NB: Any `qcodes` submodule must also be imported after pycqed.") # We need to import this here so that any later imports of `QtPlot` from qcodes # KEEP ABOVE any QtPlot import!!! - from pycqed.measurement import qcodes_QtPlot_monkey_patching + warnings.warn(message="Attempts to apply qcodes QtPlot monkey patch.") + try: + # from qcodes import plots + from pycqed.measurement import qcodes_QtPlot_monkey_patching + except ImportError as e: + warnings.warn(message="Failed to apply qcodes QtPlot monkey patch.") # from pycqed import measurement # from pycqed import analysis diff --git a/pycqed/analysis/analysis_toolbox.py b/pycqed/analysis/analysis_toolbox.py index dacc3550dd..101a1da637 100644 --- a/pycqed/analysis/analysis_toolbox.py +++ b/pycqed/analysis/analysis_toolbox.py @@ -9,7 +9,7 @@ import colorsys as colors # FIXME: was commented out, breaks code below import qutip as qtp -import qutip.metrics as qpmetrics +import qutip.core.metrics as qpmetrics from copy import deepcopy from collections import OrderedDict as od diff --git a/pycqed/analysis/measurement_analysis.py b/pycqed/analysis/measurement_analysis.py index f2fc2d0e3f..d90dafe214 100644 --- a/pycqed/analysis/measurement_analysis.py +++ b/pycqed/analysis/measurement_analysis.py @@ -2531,8 +2531,8 @@ def run_default_analysis(self, print_fit_results=False, verbose = kw.get('verbose', False) # Get old values for qubit frequency - instr_set = self.data_file['Instrument settings'] try: + instr_set = self.data_file['Instrument settings'] if self.for_ef: self.qubit_freq_spec = \ float(instr_set[self.qb_name].attrs['f_ef_qubit']) @@ -4697,24 +4697,11 @@ def run_default_analysis(self, show=False, close_file=False, **kw): units = SI_prefix_and_scale_factor(val=max(abs(self.ax.get_xticks())), unit=self.sweep_unit[0])[1] - # Get old values - instr_set = self.data_file['Instrument settings'] - try: - if self.for_ef: - T1_old = float( - instr_set[self.qb_name].attrs['T1_ef']) * 1e6 - else: - T1_old = float(instr_set[self.qb_name].attrs['T1']) * 1e6 - old_vals = '\nold $T_1$ = {:.5f} '.format(T1_old) + units - except (TypeError, KeyError, ValueError): - logging.warning('qb_name is None. Old parameter values will ' - 'not be retrieved.') - old_vals = '' textstr = ('$T_1$ = {:.5f} '.format(T1_micro_sec) + units + ' $\pm$ {:.5f} '.format(T1_err_micro_sec) + - units + old_vals) + units) self.fig.text(0.5, -0.2, textstr, transform=self.ax.transAxes, fontsize=self.font_size, @@ -4778,7 +4765,8 @@ class Ramsey_Analysis(TD_Analysis): Most kw parameters for Rabi_Analysis are also used here. """ - def __init__(self, label='Ramsey', phase_sweep_only=False, **kw): + def __init__(self, label='Ramsey', phase_sweep_only=False, + **kw): kw['label'] = label kw['h5mode'] = 'r+' self.phase_sweep_only = phase_sweep_only @@ -4970,15 +4958,17 @@ def run_default_analysis(self, print_fit_results=False, close_main_figure=True, save_fig=False, **kw) verbose = kw.get('verbose', False) - # Get old values for qubit frequency - instr_set = self.data_file['Instrument settings'] try: if self.for_ef: + # Get old values for qubit frequency + instr_set = self.data_file['Instrument settings'] self.qubit_freq_spec = \ float(instr_set[self.qb_name].attrs['f_ef_qubit']) elif 'freq_qubit' in kw.keys(): self.qubit_freq_spec = kw['freq_qubit'] else: + # Get old values for qubit frequency + instr_set = self.data_file['Instrument settings'] try: self.qubit_freq_spec = \ float(instr_set[self.qb_name].attrs['f_qubit']) @@ -6335,16 +6325,8 @@ def run_default_analysis(self, print_fit_results=False, scale = SI_prefix_and_scale_factor(val=max(abs(ax.get_xticks())), unit=self.sweep_unit[0])[0] - - instr_set = self.data_file['Instrument settings'] - try: - old_RO_freq = float(instr_set[self.qb_name].attrs['f_RO']) - old_vals = '\n$f_{\mathrm{old}}$ = %.5f GHz' % ( - old_RO_freq * scale) - except (TypeError, KeyError, ValueError): - logging.warning('qb_name is None. Old parameter values will ' - 'not be retrieved.') - old_vals = '' + + old_vals = '' if ('hanger' in fitting_model) or ('complex' in fitting_model): if kw['custom_power_message'] is None: diff --git a/pycqed/analysis/tomography.py b/pycqed/analysis/tomography.py index 4b524a641f..348e368425 100644 --- a/pycqed/analysis/tomography.py +++ b/pycqed/analysis/tomography.py @@ -4,6 +4,7 @@ # except ImportError as e: # logging.warning('Could not import qutip, tomo code will not work') import qutip as qtp +from qutip.qip import operations import numpy as np import time import scipy @@ -38,10 +39,10 @@ class TomoAnalysis_JointRO(): # The set of single qubit rotation matrixes used in the tomography # measurement (will be assumed to be used on all qubits) rotation_matrixes = [qtp.identity(2), qtp.sigmax(), - qtp.rotation(qtp.sigmax(), np.pi / 2), - qtp.rotation(qtp.sigmay(), np.pi / 2), - qtp.rotation(qtp.sigmax(), -np.pi / 2), - qtp.rotation(qtp.sigmay(), -np.pi / 2)] + operations.gates.rotation(qtp.sigmax(), np.pi / 2), + operations.gates.rotation(qtp.sigmay(), np.pi / 2), + operations.gates.rotation(qtp.sigmax(), -np.pi / 2), + operations.gates.rotation(qtp.sigmay(), -np.pi / 2)] measurement_operator_labels = ['I', 'X', 'x', 'y', '-x', '-y'] # MAKE SURE THE LABELS CORRESPOND TO THE ROTATION MATRIXES DEFINED ABOVE @@ -1164,10 +1165,10 @@ def run_default_analysis(self, **kw): TomoAnalysis_JointRO.rotation_matrixes = [ qtp.identity(2), qtp.sigmax(), - qtp.rotation(qtp.sigmay(), np.pi / 2), - qtp.rotation(qtp.sigmay(), -np.pi / 2), - qtp.rotation(qtp.sigmax(), np.pi / 2), - qtp.rotation(qtp.sigmax(), -np.pi / 2)] + operations.gates.rotation(qtp.sigmay(), np.pi / 2), + operations.gates.rotation(qtp.sigmay(), -np.pi / 2), + operations.gates.rotation(qtp.sigmax(), np.pi / 2), + operations.gates.rotation(qtp.sigmax(), -np.pi / 2)] TomoAnalysis_JointRO.measurement_operator_labels = ['I', 'X', 'y', '-y', 'x', '-x'] TomoAnalysis_JointRO.measurement_basis = [qtp.identity(2), diff --git a/pycqed/analysis_v2/GBT_analysis.py b/pycqed/analysis_v2/GBT_analysis.py new file mode 100644 index 0000000000..e088931935 --- /dev/null +++ b/pycqed/analysis_v2/GBT_analysis.py @@ -0,0 +1,1339 @@ +import os +import warnings +import matplotlib.pyplot as plt +import numpy as np +import pycqed.analysis_v2.base_analysis as ba +from pycqed.analysis.analysis_toolbox import get_datafilepath_from_timestamp +import pycqed.measurement.hdf5_data as h5d +from pycqed.analysis_v2 import measurement_analysis as ma2 +from pycqed.analysis import measurement_analysis as ma +from pycqed.analysis import analysis_toolbox as a_tools + +import numpy as np +import matplotlib +from matplotlib import patches, path, pyplot as plt +from matplotlib.colors import to_rgba + +map_qubits = { + 'Z3' : [-2,-1], + 'D9' : [ 0, 2], + 'X4' : [-1, 2], + 'D8' : [-1, 1], + 'Z4' : [ 0, 1], + 'D6' : [ 1, 1], + 'D7' : [-2, 0], + 'X3' : [-1, 0], + 'D5' : [ 0, 0], + 'X2' : [ 1, 0], + 'D3' : [ 2, 0], + 'D4' : [-1,-1], + 'Z1' : [ 0,-1], + 'D2' : [ 1,-1], + 'X1' : [ 1,-2], + 'Z2' : [ 2, 1], + 'D1' : [ 0,-2] + } + +def draw_square(ax, X, l, c, alpha=1): + rect = patches.Rectangle((X[0]-l/2, X[1]-l/2), l, l, linewidth=0, facecolor=c, alpha=alpha) + ax.add_patch(rect) + +def plot_1Qmetric(values, + ax, vmax=None, vmin=None, + colorbar=True, factor=1, + metric='T1', units='$\mu s$', digit=2, + **kw): + + if vmax is None: + vmax = np.max([v*factor for v in values.values() if v != None]) + if vmin is None: + vmin = np.min([v*factor for v in values.values() if v != None]) + + cmap = matplotlib.colors.ListedColormap(plt.cm.viridis.colors[::]) + norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax) + for qubit in values.keys(): + if values[qubit] != None: + c = cmap(norm(values[qubit]*factor)) + if values[qubit]*factor > (vmax-vmin)/2 + vmin : + c_text = 'k' + else: + c_text = 'white' + string = r'{:.'+str(digit)+r'f}' + string = string.format(values[qubit]*factor) + + else: + c = 'lightgray' + c_text = 'white' + string = '' + + draw_square(ax, map_qubits[qubit], l=.96, c=c) + ax.text(map_qubits[qubit][0], map_qubits[qubit][1], + string, + ha='center', va='center', size=10, color=c_text) + ax.text(map_qubits[qubit][0], map_qubits[qubit][1]+.25, + qubit, ha='center', va='center', size=6, color=c_text) + + ax.set_xlim(-2.5, 2.5) + ax.set_ylim(-2.5, 2.5) + ax.axis('off') + sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) + if colorbar: + fig = ax.get_figure() + pos = ax.get_position() + cb = fig.colorbar(sm, label=f'{metric} ({units})') + ax.set_position([pos.x0-.2, pos.y0, pos.width, pos.height]) + +class SingleQubitGBT_analysis(ba.BaseDataAnalysis): + """ + Analysis for Chevron routine + """ + def __init__(self, + Qubits: list, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + auto=True): + + super().__init__(t_start=t_start, + t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) + self.Qubits = Qubits + if auto: + self.run_analysis() + + def extract_data(self): + self.raw_data_dict = {} + self.get_timestamps() + self.timestamp = self.timestamps[0] + data_fp = get_datafilepath_from_timestamp(self.timestamp) + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + # Extract last measured metrics + for q in self.Qubits: + self.raw_data_dict[q] = {} + # Get AllXY data + fp_allxy = a_tools.latest_data(contains=f"AllXY_{q}", + older_than=self.timestamp) + label_allxy = fp_allxy.split('\\')[-1] + allxy = ma.AllXY_Analysis(label=label_allxy, + extract_only=True) + self.raw_data_dict[q]['allxy'] = allxy.corr_data + self.raw_data_dict[q]['allxy_err'] = allxy.deviation_total + # Analyse RB experiment + fp_rb = a_tools.latest_data(contains=f"seeds_{q}", + older_than=self.timestamp) + label_rb = fp_rb.split('\\')[-1] + rb = ma2.RandomizedBenchmarking_SingleQubit_Analysis( + label=label_rb, + rates_I_quad_ch_idx=0, + cal_pnts_in_dset=np.repeat(["0", "1", "2"], 2), + extract_only=True) + N_clf = rb.proc_data_dict['ncl'] + M0 = rb.proc_data_dict['M0'] + X1 = rb.proc_data_dict['X1'] + _err_key = [k for k in rb.proc_data_dict['quantities_of_interest'].keys()\ + if 'eps_g' in k ][0] + _L1_key = [k for k in rb.proc_data_dict['quantities_of_interest'].keys()\ + if 'L1' in k ][0] + SQG_err = rb.proc_data_dict['quantities_of_interest'][_err_key] + L1_err = rb.proc_data_dict['quantities_of_interest'][_L1_key] + self.raw_data_dict[q]['N_clf'] = N_clf + self.raw_data_dict[q]['M0'] = list(M0.values())[0] + self.raw_data_dict[q]['X1'] = list(X1.values())[0] + self.raw_data_dict[q]['SQG_err'] = SQG_err.nominal_value + self.raw_data_dict[q]['L1_err'] = L1_err.nominal_value + # Analyse SSRO experiment + fp_ssro = a_tools.latest_data(contains=f"SSRO_f_{q}", + older_than=self.timestamp) + label_ssro = fp_ssro.split('\\')[-1] + ssro = ma2.ra.Singleshot_Readout_Analysis( + label=label_ssro, + qubit=q, + qubit_freq=6e9, + heralded_init=True, + f_state=True, + extract_only=True) + self.raw_data_dict[q]['RO_err'] = 1-ssro.qoi['F_a'] + self.raw_data_dict[q]['RO_n0'] = ssro.proc_data_dict['h0'] + self.raw_data_dict[q]['RO_n1'] = ssro.proc_data_dict['h1'] + # Analyse T1 + fp_t1 = a_tools.latest_data(contains=f"T1_{q}", + older_than=self.timestamp) + label_t1 = fp_t1.split('\\')[-1] + t1 = ma.T1_Analysis(label = label_t1, + auto=True, close_fig=True, extract_only=True) + self.raw_data_dict[q]['t_T1'] = t1.sweep_points[:-4] + self.raw_data_dict[q]['p_T1'] = t1.normalized_data_points + self.raw_data_dict[q]['T1'] = t1.T1 + # Analyse T2 echo + fp_t2 = a_tools.latest_data(contains=f"echo_{q}", + older_than=self.timestamp) + label_t2 = fp_t2.split('\\')[-1] + t2 = ma.Echo_analysis_V15(label=label_t2, + auto=True, close_fig=True, extract_only=True) + self.raw_data_dict[q]['t_T2'] = t2.sweep_points[:-4] + self.raw_data_dict[q]['p_T2'] = t2.normalized_data_points + self.raw_data_dict[q]['T2'] = t2.fit_res.params['tau'].value + # Bundle all errors in array so it gets saved in hdf5 + self.raw_data_dict[q]['Errors'] = np.array([ + self.raw_data_dict[q]['SQG_err'], + self.raw_data_dict[q]['L1_err'], + self.raw_data_dict[q]['RO_err'], + self.raw_data_dict[q]['allxy_err'], + self.raw_data_dict[q]['T1'], + self.raw_data_dict[q]['T2']]) + + self.proc_data_dict = {} + self.proc_data_dict['quantities_of_interest'] = self.raw_data_dict + + def process_data(self): + pass + + def prepare_plots(self): + self.axs_dict = {} + fig = plt.figure(figsize=(3,3), dpi=150) + axs = {} + n_metrics = 6 # [SQG err, SQG leakage, allxy, T1, T2] + _n = 1.4*n_metrics + self.figs['Single_Qubit_performance_overview'] = fig + for q in self.Qubits: + ax = _add_singleQ_plot(q, _n, fig, axs) + self.axs_dict[f'{q}'] = ax + # Plot single qubit gate error + self.plot_dicts[f'{q}_SQG_er']={ + 'plotfn': _plot_SQG_error, + 'ax_id': f'{q}', + 'N_clf': self.raw_data_dict[q]['N_clf'], + 'M0': self.raw_data_dict[q]['M0'], + 'SQG_err': self.raw_data_dict[q]['SQG_err'], + 'row': 0, + 'n': _n, + } + # Plot single_qubit gate leakage + self.plot_dicts[f'{q}_leakage']={ + 'plotfn': _plot_SQG_leakage, + 'ax_id': f'{q}', + 'N_clf': self.raw_data_dict[q]['N_clf'], + 'X1': self.raw_data_dict[q]['X1'], + 'SQG_leak': self.raw_data_dict[q]['L1_err'], + 'row': 1, + 'n': _n, + } + # Plot single_qubit gate leakage + self.plot_dicts[f'{q}_allxy']={ + 'plotfn': _plot_allxy, + 'ax_id': f'{q}', + 'allxy': self.raw_data_dict[q]['allxy'], + 'allxy_err': self.raw_data_dict[q]['allxy_err'], + 'row': 2, + 'n': _n, + } + # Plot single_qubit gate leakage + self.plot_dicts[f'{q}_T1']={ + 'plotfn': _plot_T1, + 'ax_id': f'{q}', + 't': self.raw_data_dict[q]['t_T1'], + 'p': self.raw_data_dict[q]['p_T1'], + 'T1': self.raw_data_dict[q]['T1'], + 'row': 3, + 'n': _n, + } + # Plot single_qubit gate leakage + self.plot_dicts[f'{q}_T2']={ + 'plotfn': _plot_T2, + 'ax_id': f'{q}', + 't': self.raw_data_dict[q]['t_T2'], + 'p': self.raw_data_dict[q]['p_T2'], + 'T2': self.raw_data_dict[q]['T2'], + 'row': 4, + 'n': _n, + } + # Plot single_qubit gate leakage + self.plot_dicts[f'{q}_SSRO']={ + 'plotfn': _plot_SSRO, + 'ax_id': f'{q}', + 'n0': self.raw_data_dict[q]['RO_n0'], + 'n1': self.raw_data_dict[q]['RO_n1'], + 'RO_err': self.raw_data_dict[q]['RO_err'], + 'row': 5, + 'n': _n, + } + n_plts = len(self.Qubits) + fig.suptitle(f'{self.timestamp}\nSingle-qubit performance', x=0.125+n_plts/2*0.775*1.3, y=2.8, size=18) + + # Plot metric map + SQG_error = { q : self.raw_data_dict[q]['SQG_err'] \ + if q in self.Qubits else None \ + for q in map_qubits.keys() } + fig, ax = plt.subplots(figsize=(3.5,3.5), dpi=200) + self.figs['Single_qubit_RB'] = fig + self.axs_dict['Single_qubit_RB'] = ax + # Plot single qubit gate error + self.plot_dicts['Single_qubit_RB']={ + 'plotfn': plot_1Qmetric, + 'ax_id': 'Single_qubit_RB', + 'values': SQG_error, + 'metric': 'Single qubit gate error', + 'units': '%', + 'digit': 2, + 'factor': 100 + } + fig.suptitle(f'{self.timestamp}\n') + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def _add_singleQ_plot(qubit, n, fig, axs): + n_plts = len(axs.keys()) + ax = fig.add_subplot(10,10,n_plts+1) + _pos = ax.get_position() + # pos = [_pos.x0 + n_plts*_pos.width*1.3, _pos.y0, _pos.width, _pos.width/3*n] + pos = [0.125 + n_plts*0.775*1.3, 0.11, 0.775, 0.775/3*n] + ax.set_position(pos) + axs[qubit] = ax + axs[qubit].text(0, n/3+.2, f'$\\mathrm{{{qubit[0]}_{qubit[1]}}}$', + va='center', ha='left', size=40) + ax.set_xlim(0,1) + ax.set_ylim(0,n/3) + ax.axis('off') + ax.patch.set_alpha(0) + return ax + +def _plot_SQG_error( + ax, + N_clf, + M0, + SQG_err, + n, + row, + **kw): + # Assess pereformance level + if SQG_err < 0.0025: + _color = 'C2' + elif SQG_err < 0.005: + _color = 'goldenrod' + else: + _color = 'C3' + # Label + ax.text(.4, (n-row*1.4)/3-.05, 'Single qubit gate', ha='left', size=11.5) + ax.text(.86, (n-row*1.4)/3-.13, 'err.', ha='left', size=11.5) + ax.text(.375, (n-row*1.4)/3-.3, f'{SQG_err*100:2.1f}', ha='left', size=50) + ax.text(.375, (n-row*1.4)/3-.3, f'{SQG_err*100:2.1f}', ha='left', size=50, + color=_color, alpha=.65) # Overlay to give color + ax.text(.85, (n-row*1.4)/3-.3, '%', ha='left', size=25) + # RB decay plot + _x = N_clf/N_clf[-1] + _y = M0 + ax.plot(_x*0.3+.025, + (_y+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + ls='-', color=_color, clip_on=False) + ax.fill_between(_x*0.3+.025, + (_y+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + (0+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, color=f'{_color}', + alpha=.1, lw=0) + ax.plot(_x*0.3+.025, + [(.5+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3 for x in _x], + f'k--', lw=.5) + +def _plot_SQG_leakage( + ax, + N_clf, + X1, + SQG_leak, + n, + row, + **kw): + # Assess pereformance level + if SQG_leak < 0.0015: + _color = 'C2' + elif SQG_leak < 0.003: + _color = 'goldenrod' + else: + _color = 'C3' + # Label + ax.text(.4, (n-row*1.4)/3-.05, 'Single qubit gate', ha='left', size=11.5) + ax.text(.86, (n-row*1.4)/3-.13, 'leak.', ha='left', size=11.5) + ax.text(.375, (n-row*1.4)/3-.3, f'{abs(SQG_leak)*1e3:2.1f}', ha='left', size=50) + ax.text(.375, (n-row*1.4)/3-.3, f'{abs(SQG_leak)*1e3:2.1f}', ha='left', size=50, + color=_color, alpha=.65) # Overlay to give color + ax.text(0.985, (n-row*1.4)/3-.25, f'-', ha='left', size=15) + ax.text(.85, (n-row*1.4)/3-.3, f'$10^{{\\:\\:3}}$', ha='left', size=17) + # RB decay plot + _x = N_clf/N_clf[-1] + _y = (1-X1)*2 + ax.plot(_x*0.3+.025, + (_y+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + ls='-', color=_color, clip_on=False) + ax.fill_between(_x*0.3+.025, + (_y+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + (0+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, color=f'{_color}', alpha=.1, lw=0) + +def _plot_allxy( + ax, + allxy, + allxy_err, + n, + row, + **kw): + # Assess pereformance level + if allxy_err < 0.01: + _color = 'C2' + elif allxy_err < 0.02: + _color = 'goldenrod' + else: + _color = 'C3' + # Label + ax.text(.4, (n-row*1.4)/3-.05, 'Single qubit AllXY', ha='left', size=11.5) + ax.text(.86, (n-row*1.4)/3-.13, 'err.', ha='left', size=11.5) + ax.text(.375, (n-row*1.4)/3-.3, f'{abs(allxy_err)*1e2:2.1f}', ha='left', size=50) + ax.text(.375, (n-row*1.4)/3-.3, f'{abs(allxy_err)*1e2:2.1f}', ha='left', size=50, + color=_color, alpha=.65) # Overlay to give color + ax.text(.85, (n-row*1.4)/3-.3, '%', ha='left', size=25) + # AllXY plot + _x = np.arange(42)/41 + _y = allxy + _y_id = np.array([0]*10 + [.5]*24 + [1]*8) + ax.plot(_x*0.3+.025, + (_y+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + ls='-', color=_color, clip_on=False) + ax.plot(_x*0.3+.025, + (_y_id+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + ls='--', lw=.5, color='k', clip_on=False) + ax.fill_between(_x*0.3+.025, + (_y+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + (0+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, color=f'{_color}', alpha=.1, lw=0) + +def _plot_T1( + ax, + t, + p, + T1, + n, + row, + **kw): + # Assess pereformance level + if T1 > 10e-6: + _color = 'C2' + elif T1 > 5e-6: + _color = 'goldenrod' + else: + _color = 'C3' + # Label + ax.text(.4, (n-row*1.4)/3-.05, 'Relaxation time', ha='left', size=11.5) + ax.text(.8, (n-row*1.4)/3-.17, '$\\mathrm{{T_1}}$', ha='left', size=20) + ax.text(.375, (n-row*1.4)/3-.3, f'{T1*1e6:2.0f}', ha='left', size=50) + ax.text(.375, (n-row*1.4)/3-.3, f'{T1*1e6:2.0f}', ha='left', size=50, + color=_color, alpha=.65) # Overlay to give color + ax.text(.8, (n-row*1.4)/3-.3, '$\\mathrm{{\\mu s}}$', ha='left', size=25) + # T1 decay plot + _x = t/t[-1] + _y = p + ax.plot(_x*0.3+.025, + (_y+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + ls='-', color=_color, clip_on=False) + ax.fill_between(_x*0.3+.025, + (_y+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + (0+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, color=f'{_color}', + alpha=.1, lw=0) + +def _plot_T2( + ax, + t, + p, + T2, + n, + row, + **kw): + # Assess pereformance level + if T2 > 15e-6: + _color = 'C2' + elif T2 > 7.5e-6: + _color = 'goldenrod' + else: + _color = 'C3' + # Label + ax.text(.4, (n-row*1.4)/3-.05, 'Echo time', ha='left', size=11.5) + ax.text(.8, (n-row*1.4)/3-.17, '$\\mathrm{{T_2}}$', ha='left', size=20) + ax.text(.375, (n-row*1.4)/3-.3, f'{T2*1e6:2.0f}', ha='left', size=50) + ax.text(.375, (n-row*1.4)/3-.3, f'{T2*1e6:2.0f}', ha='left', size=50, + color=_color, alpha=.65) # Overlay to give color + ax.text(.8, (n-row*1.4)/3-.3, '$\\mathrm{{\\mu s}}$', ha='left', size=25) + # T2 decay plot + _x = t/t[-1] + _y = 1-p + _y_env = (1+np.exp(-t/T2))/2 + ax.plot(_x*0.3+.025, + (_y+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + ls='-', color=_color, clip_on=False) + ax.plot(_x*0.3+.025, + (_y_env+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + ls='--', lw=.5, color=_color, clip_on=False) + ax.plot(_x*0.3+.025, + (1-_y_env+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + ls='--', lw=.5, color=_color, clip_on=False) + ax.fill_between(_x*0.3+.025, + (_y_env+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + (1-_y_env+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, color=f'{_color}', + alpha=.1, lw=0) + +def _plot_SSRO( + ax, + RO_err, + n0, n1, + n, + row, + **kw): + if RO_err < .015: + _color = 'C2' + elif RO_err < .025: + _color = 'goldenrod' + else: + _color = 'C3' + # Label + ax.text(.4, (n-row*1.4)/3-.05, 'Readout error', ha='left', size=11.5) + ax.text(.375, (n-row*1.4)/3-.3, f'{RO_err*100:.1f}', ha='left', size=50) + ax.text(.375, (n-row*1.4)/3-.3, f'{RO_err*100:.1f}', ha='left', size=50, + color=_color, alpha=0.65) # color overlay + ax.text(.85, (n-row*1.4)/3-.3, '%', ha='left', size=25) + # Plot + _max = max(np.max(n0), np.max(n1)) + _n0 = n0/_max + _n1 = n1/_max + _x = np.arange(len(n0))/(len(n0)-1) + ax.fill_between(_x*0.33+.02, + (_n0+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + (0+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + color=_color, alpha=.1) + ax.fill_between(_x*0.33+.02, + (_n1+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + (0+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + color=_color, alpha=.1) + ax.fill_between(_x*0.33+.02, + (_n0+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + (0+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + color=_color, + fc='None', + hatch='///', alpha=.3) + ax.fill_between(_x*0.33+.02, + (_n1+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + (0+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + color=_color, + fc='None', + hatch='\\\\\\', alpha=.3) + ax.plot(_x*0.33+.02, + (_n0+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, f'{_color}') + ax.plot(_x*0.33+.02, + (_n1+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, f'{_color}') + + +class RoundedPolygon(patches.PathPatch): + def __init__(self, xy, pad, **kwargs): + p = path.Path(*self.__round(xy=xy, pad=pad)) + super().__init__(path=p, **kwargs) + + def __round(self, xy, pad): + n = len(xy) + + for i in range(0, n): + + x0, x1, x2 = np.atleast_1d(xy[i - 1], xy[i], xy[(i + 1) % n]) + + d01, d12 = x1 - x0, x2 - x1 + d01, d12 = d01 / np.linalg.norm(d01), d12 / np.linalg.norm(d12) + + x00 = x0 + pad * d01 + x01 = x1 - pad * d01 + x10 = x1 + pad * d12 + x11 = x2 - pad * d12 + + if i == 0: + verts = [x00, x01, x1, x10] + else: + verts += [x01, x1, x10] + codes = [path.Path.MOVETO] + n*[path.Path.LINETO, path.Path.CURVE3, path.Path.CURVE3] + + return np.atleast_1d(verts, codes) + +def draw_tqg(x, y, ax, vertical, qubit_1, qubit_2, color, c_text, number=None, digit=2): + l = .3 + if vertical: + xy = np.array([(x+l, y+(.5-l)), + ( x, y+.5), + (x-l, y+(.5-l)), + (x-l, y-(.5-l)), + ( x, y-.5), + (x+l, y-(.5-l))]) + ax.text(x,y+.3, qubit_1, color=c_text, va='center', ha='center', size=4., rotation=0) + ax.text(x,y-.3, qubit_2, color=c_text, va='center', ha='center', size=4., rotation=0) + else: + xy = np.array([(x+(.5-l), y+l), + (x+.5, y), + ( x+(.5-l), y-l), + ( x-(.5-l), y-l), + (x-.5, y), + ( x-(.5-l), y+l)]) + ax.text(x-.3,y, qubit_1, color=c_text, va='center', ha='center', size=4., rotation=0) + ax.text(x+.3,y, qubit_2, color=c_text, va='center', ha='center', size=4., rotation=0) + if number: + string = r'{:.'+str(digit)+r'f}' + string = string.format(number) + ax.text(x,y, string, color=c_text, va='center', ha='center', size=6, rotation=0) + ax.add_patch(RoundedPolygon(xy=xy, pad=0.05, facecolor=color, edgecolor='white', lw=1, zorder=1.5)) + +Qubit_pairs = [ ('D7', 'Z3'), + ('Z3', 'D4'), + ('X4', 'D9'), + ('D9', 'Z4'), + ('X4', 'D8'), + ('D8', 'Z4'), + ('D8', 'X3'), + ('Z4', 'D6'), + ('Z4', 'D5'), + ('D6', 'X2'), + ('D6', 'Z2'), + ('D7', 'X3'), + ('X3', 'D5'), + ('X3', 'D4'), + ('D5', 'X2'), + ('D5', 'Z1'), + ('X2', 'D3'), + ('X2', 'D2'), + ('Z2', 'D3'), + ('D4', 'Z1'), + ('Z1', 'D2'), + ('Z1', 'D1'), + ('D2', 'X1'), + ('D1', 'X1')] + +def plot_2Qmetric(values, + ax, vmax=None, vmin=None, + colorbar=True, factor=1, + metric='T1', units='$\mu s$', digit=2, + **kw): + # Setup plot params + if vmax is None: + vmax = np.max([v for v in values.values() if v != None])*factor + if vmin is None: + vmin = np.min([v for v in values.values() if v != None])*factor + cmap = matplotlib.colors.ListedColormap(plt.cm.viridis.colors[::]) + norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax) + fig = ax.get_figure() + # Draw two-qubit patch + for pair in Qubit_pairs: + x1 , y1 = map_qubits[pair[0]] + x2 , y2 = map_qubits[pair[1]] + if not pair in values.keys(): + _pair = (pair[1], pair[0]) + if not _pair in values.keys(): + values[_pair] = None + else: + _pair = pair + if values[_pair] is None: + if ((abs(x1-x2) == 0) and (abs(y1-y2) == 1)): + x, y = np.mean([x1, x2]), np.mean([y1, y2]) + draw_tqg(x, y, ax, vertical=True, qubit_1=pair[0], qubit_2=pair[1], + number=None, color=to_rgba('gray', .5), c_text='white') + elif ((abs(x1-x2) == 1) and (abs(y1-y2) == 0)): + x, y = np.mean([x1, x2]), np.mean([y1, y2]) + draw_tqg(x, y, ax, vertical=False, qubit_1=pair[0], qubit_2=pair[1], + number=None, color=to_rgba('gray', .5), c_text='white') + else: + c = cmap(norm(values[_pair]*factor)) + if values[_pair]*factor > vmin+(vmax-vmin)/2 : + c_text = 'k' + else: + c_text = 'white' + if ((abs(x1-x2) == 0) and (abs(y1-y2) == 1)): + x, y = np.mean([x1, x2]), np.mean([y1, y2]) + draw_tqg(x, y, ax, vertical=True, qubit_1=pair[0], qubit_2=pair[1], + number=values[_pair]*factor, color=c, c_text=c_text, digit=digit) + elif ((abs(x1-x2) == 1) and (abs(y1-y2) == 0)): + x, y = np.mean([x1, x2]), np.mean([y1, y2]) + draw_tqg(x, y, ax, vertical=False, qubit_1=pair[0], qubit_2=pair[1], + number=values[_pair]*factor, color=c, c_text=c_text, digit=digit) + ax.set_xlim(-2.5, 2.5) + ax.set_ylim(-2.5, 2.5) + ax.axis('off') + if colorbar: + pos = ax.get_position() + sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) + cb = fig.colorbar(sm, label=f'{metric} ({units})') + ax.set_position([pos.x0-.15, pos.y0, pos.width, pos.height]) + ax.patch.set_visible(False) + fig.patch.set_visible(False) + +class TwoQubitGBT_analysis(ba.BaseDataAnalysis): + """ + Analysis for Chevron routine + """ + def __init__(self, + Qubit_pairs: list, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + auto=True): + + super().__init__(t_start=t_start, + t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) + self.Qubit_pairs = Qubit_pairs + if auto: + self.run_analysis() + + def extract_data(self): + self.raw_data_dict = {} + self.get_timestamps() + self.timestamp = self.timestamps[0] + data_fp = get_datafilepath_from_timestamp(self.timestamp) + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + # Extract last measured metrics + for q0, q1 in self.Qubit_pairs: + # Analyse IRB experiment + fp_base = a_tools.latest_data(contains=f"icl[None]_{q0}_{q1}", + older_than=self.timestamp) + fp_int = a_tools.latest_data(contains=f"icl[104368]_{q0}_{q1}", + older_than=self.timestamp) + label_base = fp_base.split('\\')[-1] + label_int = fp_int.split('\\')[-1] + a = ma2.InterleavedRandomizedBenchmarkingAnalysis( + label_base=label_base, + label_int=label_int, + # label_base=f"icl[None]_{q0}_{q1}", + # label_int=f"icl[104368]_{q0}_{q1}", + extract_only=True) + N_clf = a.raw_data_dict['analyses']['base'].proc_data_dict['ncl'] + M_ref = a.raw_data_dict['analyses']['base'].proc_data_dict['M0']['2Q'] + M_int = a.raw_data_dict['analyses']['int'].proc_data_dict['M0']['2Q'] + X1_ref = a.raw_data_dict['analyses']['base'].proc_data_dict['X1']['2Q'] + X1_int = a.raw_data_dict['analyses']['int'].proc_data_dict['X1']['2Q'] + TQG_err = a.proc_data_dict['quantities_of_interest']['eps_CZ_simple'].nominal_value + L1_err = a.proc_data_dict['quantities_of_interest']['L1_CZ'].nominal_value + self.raw_data_dict[(q0, q1)] = {'N_clf':N_clf, 'M_ref':M_ref, 'M_int':M_int, 'TQG_err':abs(TQG_err), + 'X1_ref':X1_ref, 'X1_int':X1_int, 'L1_err':L1_err,} + + def process_data(self): + pass + + def prepare_plots(self): + self.axs_dict = {} + fig = plt.figure(figsize=(3,3), dpi=100) + axs = {} + n_metrics = 2 # [IRB, leakage] + _n = 1.4*n_metrics + self.figs['Two_Qubit_performance_overview'] = fig + for Qubits in self.Qubit_pairs: + Qubits = tuple(Qubits) + ax = _add_twoQ_plot(Qubits, _n, fig, axs) + self.axs_dict[f'{Qubits}'] = ax + # Plot two_qubit gate error + self.plot_dicts[f'{Qubits}_error']={ + 'plotfn': _plot_TQG_error, + 'ax_id': f'{Qubits}', + 'N_clf': self.raw_data_dict[Qubits]['N_clf'], + 'M_ref': self.raw_data_dict[Qubits]['M_ref'], + 'M_int': self.raw_data_dict[Qubits]['M_int'], + 'TQG_err': self.raw_data_dict[Qubits]['TQG_err'], + 'row': 0, + 'n': _n, + } + # Plot two_qubit gate leakage + self.plot_dicts[f'{Qubits}_leakage']={ + 'plotfn': _plot_TQG_leakage, + 'ax_id': f'{Qubits}', + 'N_clf': self.raw_data_dict[Qubits]['N_clf'], + 'X1_ref': self.raw_data_dict[Qubits]['X1_ref'], + 'X1_int': self.raw_data_dict[Qubits]['X1_int'], + 'TQG_leak': self.raw_data_dict[Qubits]['L1_err'], + 'row': 1, + 'n': _n, + } + n_plts = len(self.Qubit_pairs) + fig.suptitle(f'{self.timestamp}\nTwo-qubit gate performance', + x=0.125+n_plts/2*0.775*1.3, y=1.35, size=18) + # Plot metric map + TQG_error = { (q0, q1) : self.raw_data_dict[(q0, q1)]['TQG_err'] \ + if (q0, q1) in self.raw_data_dict.keys() else None \ + for (q0, q1) in self.Qubit_pairs } + fig, ax = plt.subplots(figsize=(4, 4), dpi=200) + self.figs['Two_qubit_RB'] = fig + self.axs_dict['Two_qubit_RB'] = ax + # Plot single qubit gate error + self.plot_dicts['Two_qubit_RB']={ + 'plotfn': plot_2Qmetric, + 'ax_id': 'Two_qubit_RB', + 'values': TQG_error, + 'metric': 'Two qubit gate error', + 'units': '%', + 'digit': 2, + 'factor': 100 + } + fig.suptitle(f'{self.timestamp}\n') + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def _add_twoQ_plot(Qubits, n, fig, axs): + n_plts = len(axs.keys()) + ax = fig.add_subplot(10,10,n_plts+1) + _pos = ax.get_position() + pos = [0.125 + n_plts*0.775*1.3, 0.11, 0.775, 0.775/3*n] + ax.set_position(pos) + axs[Qubits] = ax + axs[Qubits].text(0, n/3+.2, f'$\\mathrm{{{Qubits[0][0]}_{Qubits[0][1]}, {Qubits[1][0]}_{Qubits[1][1]}}}$', + va='center', ha='left', size=40) + ax.set_xlim(0,1) + ax.set_ylim(0,n/3) + ax.axis('off') + ax.patch.set_alpha(0) + return ax + +def _plot_TQG_error( + ax, + N_clf, + M_ref, + M_int, + TQG_err, + n, + row, + **kw): + # fig = ax.get_figure() + # ax = fig.get_axes()[-1] + # Assess pereformance level + if TQG_err < 0.02: + _color = 'C2' + elif TQG_err < 0.04: + _color = 'goldenrod' + else: + _color = 'C3' + # Label + ax.text(.4, (n-row*1.4)/3-.05, 'Two qubit gate', ha='left', size=11.5) + ax.text(.86, (n-row*1.4)/3-.13, 'err.', ha='left', size=11.5) + ax.text(.375, (n-row*1.4)/3-.3, f'{TQG_err*100:2.1f}', ha='left', size=50) + ax.text(.375, (n-row*1.4)/3-.3, f'{TQG_err*100:2.1f}', ha='left', size=50, + color=_color, alpha=.65) # Overlay to give color + ax.text(.85, (n-row*1.4)/3-.3, '%', ha='left', size=25) + # RB decay plot + _x = N_clf/N_clf[-1] + _yref = (M_ref) + _yint = (M_int) + ax.plot(_x*0.3+.025, + (_yref+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + ls='--', color=_color, clip_on=False) + ax.plot(_x*0.3+.025, + (_yint+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + color=_color, clip_on=False) + ax.fill_between(_x*0.3+.025, + (_yref+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + (0+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, color=f'{_color}', alpha=.1, lw=0) + ax.plot(_x*0.3+.025, + [(.25+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3 for x in _x], + f'k--', lw=.5) + +def _plot_TQG_leakage( + ax, + N_clf, + X1_ref, + X1_int, + TQG_leak, + n, + row, + **kw): + # fig = ax.get_figure() + # ax = fig.get_axes()[-1] + # Assess pereformance level + if TQG_leak < 0.005: + _color = 'C2' + elif TQG_leak < 0.01: + _color = 'goldenrod' + else: + _color = 'C3' + # Label + ax.text(.4, (n-row*1.4)/3-.05, 'Two qubit gate', ha='left', size=11.5) + ax.text(.86, (n-row*1.4)/3-.13, 'leak.', ha='left', size=11.5) + ax.text(.375, (n-row*1.4)/3-.3, f'{abs(TQG_leak)*100:2.1f}', ha='left', size=50) + ax.text(.375, (n-row*1.4)/3-.3, f'{abs(TQG_leak)*100:2.1f}', ha='left', size=50, + color=_color, alpha=.65) # Overlay to give color + ax.text(.85, (n-row*1.4)/3-.3, '%', ha='left', size=25) + # RB decay plot + _x = N_clf/N_clf[-1] + _yref = (1-X1_ref) + _yint = (1-X1_int) + ax.plot(_x*0.3+.025, + (_yref+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + ls='--', color=_color, clip_on=False) + ax.plot(_x*0.3+.025, + (_yint+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + color=_color, clip_on=False) + ax.fill_between(_x*0.3+.025, + (_yref+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, + (0+.1)*(1/3)/(1+2*.1)+(n-1-row*1.4)/3, color=f'{_color}', alpha=.1, lw=0) + + +class ParityCheckGBT_analysis(ba.BaseDataAnalysis): + """ + Analysis for Chevron routine + """ + def __init__(self, + Stabilizers: list, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + auto=True): + + super().__init__(t_start=t_start, + t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) + self.Stabilizers = Stabilizers + if auto: + self.run_analysis() + + def extract_data(self): + self.raw_data_dict = {} + self.get_timestamps() + self.timestamp = self.timestamps[0] + data_fp = get_datafilepath_from_timestamp(self.timestamp) + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + # Extract last measured metrics + for stab in self.Stabilizers: + self.raw_data_dict[stab] = {} + # Analyse parity check ramsey + fp_ramsey = ma.a_tools.latest_data(contains=f"Parity_check_ramsey_{stab}", + older_than=self.timestamp) + label_ramsey = fp_ramsey.split('\\')[-1] + Data_qubits = label_ramsey.split(f'ramsey_{stab}_')[-1] + Data_qubits = Data_qubits.split('_device')[0] + Data_qubits = Data_qubits.split('_') + control_cases = ['{:0{}b}'.format(i, len(Data_qubits))\ + for i in range(2**len(Data_qubits))] + a = ma2.tqg.Parity_check_ramsey_analysis( + label=label_ramsey, + Q_target = [stab], + Q_control = Data_qubits, + Q_spectator = [], + control_cases = control_cases, + angles = np.arange(0, 341, 20), + solve_for_phase_gate_model = False, + extract_only = True) + self.raw_data_dict[stab]['ramsey_curves'] = a.proc_data_dict['Ramsey_curves'][stab] + self.raw_data_dict[stab]['ramsey_fit'] = a.proc_data_dict['Fit_res'][stab] + self.raw_data_dict[stab]['missing_fraction'] = a.proc_data_dict['Missing_fraction'] + # Analyse parity assignment fidelity + fp_fidelity = ma.a_tools.latest_data(contains=f"Parity_check_fidelity_{stab}", + older_than=self.timestamp) + label_fidelity = fp_fidelity.split('\\')[-1] + Data_qubits = fp_fidelity.split(f'fidelity_{stab}_')[-1] + Data_qubits = Data_qubits.split('_device')[0] + Data_qubits = Data_qubits.split('_') + control_cases = ['{:0{}b}'.format(i, len(Data_qubits))\ + for i in range(2**len(Data_qubits))] + a = ma2.tqg.Parity_check_fidelity_analysis( + label=label_fidelity, + Q_ancilla=stab, + Q_control=Data_qubits, + control_cases=control_cases, + post_selection=False, + extract_only=True) + self.raw_data_dict[stab]['P_dist'] = a.proc_data_dict['P'] + self.raw_data_dict[stab]['P_dist_id'] = a.proc_data_dict['P_ideal'] + self.raw_data_dict[stab]['assignment_fidelity'] = a.qoi['fidelity'] + # Analyse parity check tomography + fp_tomo = ma.a_tools.latest_data(contains=f"parity_tomography_{stab}", + older_than=self.timestamp) + label_tomo = fp_tomo.split('\\')[-1] + Data_qubits = fp_tomo.split(f'tomography_{stab}_')[-1] + Data_qubits = Data_qubits.split('_sim')[0] + Data_qubits = Data_qubits.split('_') + try: + if stab == 'Z1': + exc_qubits = ['D4', 'D5'] # ['D1'] + elif stab == 'Z2': + exc_qubits = ['D3'] + elif stab == 'Z3': + exc_qubits = ['D7'] + elif stab == 'Z4': + exc_qubits = ['D5'] + elif stab == 'X1': + exc_qubits = ['D2'] + elif stab == 'X2': + exc_qubits = ['D2', 'D3'] + elif stab == 'X3': + exc_qubits = ['D8'] + elif stab == 'X4': + exc_qubits = [] + else: + raise ValueError('exception qubits must be given to analysis') + + a = ma2.pba.Weight_n_parity_tomography( + label = label_tomo, + sim_measurement=True, + n_rounds=2, + exception_qubits=exc_qubits, + post_selection=False, + extract_only=True) + self.raw_data_dict[stab]['rho_0'] = a.proc_data_dict['rho_0'] + self.raw_data_dict[stab]['rho_1'] = a.proc_data_dict['rho_1'] + self.raw_data_dict[stab]['fidelity_0'] = a.proc_data_dict['Fid_0'] + self.raw_data_dict[stab]['fidelity_1'] = a.proc_data_dict['Fid_1'] + self.raw_data_dict[stab]['M1'] = a.proc_data_dict['M1'] + self.raw_data_dict[stab]['M2'] = a.proc_data_dict['M2'] + self.raw_data_dict[stab]['repeatability'] = a.proc_data_dict['repeatability'] + except Exception as e: + warnings.warn(e) + + def process_data(self): + pass + + def prepare_plots(self): + self.axs_dict = {} + fig = plt.figure(figsize=(3,3), dpi=120) + axs = {} + n_metrics = 4 # [Ramsey, ass. fid., tomox2, repeatability] + _n = 1.4*n_metrics + self.figs['Parity_check_performance_overview'] = fig + for stab in self.Stabilizers: + ax = _add_PC_plot(stab, _n, fig, axs) + self.axs_dict[f'{stab}'] = ax + # Plot ancilla ramsey curves + self.plot_dicts[f'{stab}_ramsey']={ + 'plotfn': _plot_ramsey_curves, + 'ax_id': f'{stab}', + 'qubit': stab, + 'Ramsey_curves': self.raw_data_dict[stab]['ramsey_curves'], + 'Fit_res': self.raw_data_dict[stab]['ramsey_fit'], + 'Missing_fraction': self.raw_data_dict[stab]['missing_fraction'], + 'row': 0, + '_n': _n, + } + # Plot parity check assignment fidelity + self.plot_dicts[f'{stab}_fidelity']={ + 'plotfn': _plot_parity_fidelity, + 'ax_id': f'{stab}', + 'qubit': stab, + 'P_dist': self.raw_data_dict[stab]['P_dist'], + 'P_dist_ideal': self.raw_data_dict[stab]['P_dist_id'], + 'fidelity': self.raw_data_dict[stab]['assignment_fidelity'], + 'row': 1, + '_n': _n, + } + # Plot data-qubit tomography + self.plot_dicts[f'{stab}_tomography']={ + 'plotfn': _plot_tomo_fidelity, + 'ax_id': f'{stab}', + 'qubit': stab, + 'rho_0': self.raw_data_dict[stab]['rho_0'], + 'rho_1': self.raw_data_dict[stab]['rho_1'], + 'fidelity_0': self.raw_data_dict[stab]['fidelity_0'], + 'fidelity_1': self.raw_data_dict[stab]['fidelity_1'], + 'row': 2, + '_n': _n, + } + # Plot repeatability + self.plot_dicts[f'{stab}_repeatability']={ + 'plotfn': _plot_repeatability, + 'ax_id': f'{stab}', + 'qubit': stab, + 'M1' : self.raw_data_dict[stab]['M1'], + 'M2' : self.raw_data_dict[stab]['M2'], + 'repeatability' : self.raw_data_dict[stab]['repeatability'], + 'row': 4, + '_n': _n, + } + n_plts = len(self.Stabilizers) + fig.suptitle(f'{self.timestamp}\nParity check performance', x=0.125+n_plts/2*0.775*1.3, y=2, size=14) + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def _add_PC_plot(stabilizer, n, fig, axs): + n_plts = len(axs.keys()) + ax = fig.add_subplot(10,10,n_plts+1) + _pos = ax.get_position() + pos = [0.125 + n_plts*0.775*1.3, 0.11, 0.775, 0.775/3*n] + ax.set_position(pos) + axs[stabilizer] = ax + axs[stabilizer].text(0, n/3+.2, f'$\\mathrm{{{stabilizer[0]}_{stabilizer[1]}}}$', + va='center', ha='left', size=40) + ax.set_xlim(0,1) + ax.set_ylim(0,n/3) + ax.axis('off') + ax.patch.set_alpha(0) + return ax + +def _plot_ramsey_curves( + ax, + qubit, + Ramsey_curves, + Fit_res, + Missing_fraction, + _n, + row = 0, + **kw): + # State conditions + mf = np.array(list(Missing_fraction.values())) + if np.all(mf < .035): + _color = 'C2' + elif np.all(mf < .07): + _color = 'goldenrod' + else: + _color = 'C3' + # definitions + def func(x, phi, A, B): + return A*(np.cos( (x+phi)/360 *2*np.pi )+1)/2 + B + # Label + ax.text(.4, (_n-row*1.4)/3, 'Missing fraction', ha='left', size=11.5) + # Ramsey plot + for state in Ramsey_curves.keys(): + # Plot ramsey data + _x = np.linspace(0.02, .35, 51) + _y = func(np.linspace(0,340,51), *Fit_res[state]) + ax.plot(_x, (_y+.1)*(1.2/3)/(1+2*.1)+(_n-1-row*1.4)/3, + marker='', ls='--', color=_color, lw=1, alpha=.5, + clip_on=False) + # Plot ramsey fits + _x = np.linspace(0.02, .35, 18) + _y = Ramsey_curves[state] + ax.plot(_x, (_y+.1)*(1.2/3)/(1+2*.1)+(_n-1-row*1.4)/3, + marker='.', ls='', markersize=1, color=_color, + clip_on=False) + # Plot Missing fraction + from matplotlib.colors import to_rgba + n = len(Missing_fraction.keys()) + for _x, q in zip(np.linspace(.41, .94, 2*n+1)[1::2], Missing_fraction.keys()): + # Qubit labels and missing fraction values + ax.text(_x, (_n-row*1.4)/3-.265, f'$\\mathrm{{{q[0]}_{q[1]}}}$', ha='center', size=9) + ax.text(_x, (_n-row*1.4)/3-.15, f'{Missing_fraction[q]*100:.1f}', + ha='center', size=8) + # Bar plot for missing fractions + _x = np.linspace(.41, .94, 2*n+1)[1::2] + _y = np.array(list(Missing_fraction.values()))*10 + ax.bar(_x, (_y)*(1/3)/(1+2*.1), + bottom = (.2)*(1/3)/(1+2*.1)+(_n-1-row*1.4)/3, + width = (.5-.025*n)/(n), + ec=_color, fc=to_rgba(_color,.1), + clip_on=False) + +def _plot_parity_fidelity( + ax, + qubit, + P_dist, + P_dist_ideal, + fidelity, + _n, + row = 1, + **kw): + # weight-4 stabilizer condition + if len(P_dist) == 16: + if 1-fidelity < 0.10: + _color = 'C2' + elif 1-fidelity < 0.125: + _color = 'goldenrod' + else: + _color = 'C3' + # weight-2 stabilizer condition + elif len(P_dist) == 4: + if 1-fidelity < 0.05: + _color = 'C2' + elif 1-fidelity < 0.075: + _color = 'goldenrod' + else: + _color = 'C3' + # definitions + def func(x, phi, A, B): + return A*(np.cos( (x+phi)/360 *2*np.pi )+1)/2 + B + # Label + ax.text(.4, (_n-row*1.4)/3-.05, 'Avg. parity assign.', ha='left', size=11.5) + ax.text(.86, (_n-row*1.4)/3-.13, 'error', ha='left', size=11.5) + ax.text(.375+.475, (_n-row*1.4)/3-.3, f'{(1-fidelity)*100:.3g}', ha='right', size=35) + ax.text(.375+.475, (_n-row*1.4)/3-.3, f'{(1-fidelity)*100:.3g}', ha='right', size=35, + color=_color, alpha=.65) # Overlay to give color + ax.text(.85, (_n-row*1.4)/3-.3, '%', ha='left', size=20) + # Bar plot + idx_sort = np.argsort([ s.count('1') for s in P_dist.keys() ]) + from matplotlib.colors import to_rgba + n = len(P_dist.keys()) + # Bar plot for missing fractions + _x = np.linspace(0, .35, 2*n+1)[1::2] + _y = np.array(list(P_dist.values()))[idx_sort] + ax.bar(_x, (_y)*(1/3)/(1+2*.1), + bottom = .1*(1/3)/(1+2*.1)+(_n-1-row*1.4)/3, + width = 0.016 if n == 16 else .07, + clip_on=False, + ec=None, fc=to_rgba(_color,.4)) + _y = np.array(list(P_dist_ideal.values()))[idx_sort] + ax.bar(_x, (_y)*(1/3)/(1+2*.1), + bottom = .1*(1/3)/(1+2*.1)+(_n-1-row*1.4)/3, + width = 0.016 if n == 16 else .07, + clip_on=False, + ec=_color, fc=to_rgba('k', 0), ls='-', lw=.5) + +def _plot_tomo_fidelity( + ax, + qubit, + rho_0, + rho_1, + fidelity_0, + fidelity_1, + _n, + row=2, + **kw): + if qubit in ['Z2', 'Z3', 'X1', 'X4']: + if 1-np.mean([fidelity_0,fidelity_1]) < .20: + _color = 'C2' + elif 1-np.mean([fidelity_0,fidelity_1]) < .25: + _color = 'goldenrod' + else: + _color = 'C3' + else: + if 1-np.mean([fidelity_0,fidelity_1]) < .30: + _color = 'C2' + elif 1-np.mean([fidelity_0,fidelity_1]) < .40: + _color = 'goldenrod' + else: + _color = 'C3' + # Label + ax.text(.4, (_n-row*1.4)/3-.05, 'GHZ state infidelity', ha='left', size=11.5) + ax.text(.55, (_n-row*1.4)/3-.15, 'ancilla state $|0\\rangle$', ha='left', size=10) + ax.text(.375+.15, (_n-row*1.4)/3-.4, f'{(1-fidelity_0)*100:.2g}', ha='left', size=40) + ax.text(.375+.15, (_n-row*1.4)/3-.4, f'{(1-fidelity_0)*100:.2g}', ha='left', size=40, + color=_color, alpha=.65) + ax.text(.85, (_n-row*1.4)/3-.4, '%', ha='left', size=20) + ax.text(.55, (_n-row*1.4)/3-.15-.375, 'ancilla state $|1\\rangle$', ha='left', size=10) + ax.text(.375+.15, (_n-row*1.4)/3-.4-.375, f'{(1-fidelity_1)*100:.2g}', ha='left', size=40) + ax.text(.375+.15, (_n-row*1.4)/3-.4-.375, f'{(1-fidelity_1)*100:.2g}', ha='left', size=40, + color=_color, alpha=.65) + ax.text(.85, (_n-row*1.4)/3-.4-.375, '%', ha='left', size=20) + # Ideal density matrices + rho_id_0 = np.zeros(rho_0.shape) + rho_id_0[ 0, 0] = .5 + rho_id_0[ 0,-1] = .5 + rho_id_0[-1, 0] = .5 + rho_id_0[-1,-1] = .5 + rho_id_1 = np.zeros(rho_1.shape) + rho_id_1[ 0, 0] = .5 + rho_id_1[ 0,-1] = -.5 + rho_id_1[-1, 0] = -.5 + rho_id_1[-1,-1] = .5 + for i, rho, rho_id in zip([0,1], [rho_0, rho_1], [rho_id_0, rho_id_1]): + # Tomography axis + fig = ax.get_figure() + pos = ax.get_position() + axt = fig.add_subplot(111, projection='3d', azim=-35, elev=30) + _pos = axt.get_position() + axt.set_position([pos.x0, _pos.y0-(i-1)*.325+.075, _pos.width/2, _pos.height/2]) + # turn background transparent + axt.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) + axt.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) + axt.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) + axt.patch.set_alpha(0) + # make the grid lines transparent + axt.xaxis._axinfo["grid"]['color'] = (1,1,1,0) + axt.yaxis._axinfo["grid"]['color'] = (1,1,1,0) + axt.zaxis._axinfo["grid"]['color'] = (1,1,1,0) + # Plot density matrix + n = len(rho) + xedges = np.linspace(0, 1, n+1) + yedges = np.linspace(0, 1, n+1) + xpos, ypos = np.meshgrid(xedges[:-1], yedges[:-1], indexing="ij") + xpos = xpos.ravel() + ypos = ypos.ravel() + zpos = 0 + dx = dy = 1/n*0.8 + dz = np.abs(rho).ravel() + import matplotlib + from matplotlib.colors import to_rgba + cmap = matplotlib.colors.LinearSegmentedColormap.from_list("", ["C3",'darkseagreen',"C0",'antiquewhite',"C3"]) + norm = matplotlib.colors.Normalize(vmin=-np.pi, vmax=np.pi) + color=cmap(norm([np.angle(e) for e in rho.ravel()])) + color_id=cmap(norm([np.angle(e) for e in rho_id.ravel()])) + dz1 = np.abs(rho_id).ravel() + axt.bar3d(xpos, ypos, zpos, dx, dy, dz, zsort='max', + color=color, alpha=1, edgecolor='black', linewidth=.1) + # selector + s = [k for k in range(len(dz1)) if dz1[k] > .15] + colors = [ to_rgba(color_id[k], .25) if dz1[k] > dz[k] else to_rgba(color_id[k], 1) for k in s ] + Z = [ dz[k] if dz1[k] > dz[k] else dz1[k] for k in s ] + DZ= [ dz1[k]-dz[k] if dz1[k] > dz[k] else dz[k]-dz1[k] for k in s ] + axt.bar3d(xpos[s], ypos[s], Z, dx, dy, dz=DZ, zsort='min', + color=colors, edgecolor=to_rgba('black', .25), linewidth=.4) + axt.set_xticklabels([]) + axt.set_yticklabels([]) + axt.set_zticklabels([]) + +def _plot_repeatability( + ax, + qubit, + M1, + M2, + repeatability, + _n, + state='good', + row = 4, + **kw): + if qubit in ['Z2', 'Z3', 'X1', 'X4']: + if 1-repeatability < .10: + _color = 'C2' + elif 1-repeatability < .15: + _color = 'goldenrod' + else: + _color = 'C3' + else: + if 1-repeatability < .20: + _color = 'C2' + elif 1-repeatability < .25: + _color = 'goldenrod' + else: + _color = 'C3' + # definitions + def func(x, phi, A, B): + return A*(np.cos( (x+phi)/360 *2*np.pi )+1)/2 + B + # Label + ax.text(.4, (_n-row*1.4)/3-.05, 'Repeatability err.', ha='left', size=11.5) + ax.text(.375+.5, (_n-row*1.4)/3-.275, f'{(1-repeatability)*100:.1f}', ha='right', size=35) + ax.text(.375+.5, (_n-row*1.4)/3-.275, f'{(1-repeatability)*100:.1f}', ha='right', size=35, + color=_color, alpha=.65) + ax.text(.875, (_n-row*1.4)/3-.275, '%', ha='left', size=20) + # Bar plot for missing fractions + from matplotlib.colors import to_rgba + _x = np.linspace(0, .35, 5)[1::2] + _y = (np.array([M1, M2])+1)/2 + ax.bar(_x, (_y+.2-.7)*(1/3)/(1+2*.1), + bottom = (.7)*(1/3)/(1+2*.1)+(_n-1-row*1.4)/3, + width = .15, + ec=None, fc=to_rgba(_color,.5), + clip_on=False) + + _y = (np.array([0, 1])+1)/2 + ax.bar(_x, (_y+.2-.7)*(1/3)/(1+2*.1), + bottom = (.7)*(1/3)/(1+2*.1)+(_n-1-row*1.4)/3, + width = .15, + ec=_color, fc=to_rgba(_color,0), + clip_on=False) diff --git a/pycqed/analysis_v2/LRU_analysis.py b/pycqed/analysis_v2/LRU_analysis.py new file mode 100644 index 0000000000..6d5af0c025 --- /dev/null +++ b/pycqed/analysis_v2/LRU_analysis.py @@ -0,0 +1,2908 @@ +""" +File containing analysis for LRU-related experiments. +""" +import os +import pycqed.analysis_v2.base_analysis as ba +import matplotlib.pyplot as plt +import numpy as np +from pycqed.analysis.analysis_toolbox import get_datafilepath_from_timestamp +import pycqed.measurement.hdf5_data as h5d +import matplotlib.patches as patches +from mpl_toolkits.mplot3d import axes3d +import matplotlib.colors +import itertools + + +def rotate_and_center_data(I, Q, vec0, vec1, phi=0): + vector = vec1-vec0 + angle = np.arctan(vector[1]/vector[0]) + rot_matrix = np.array([[ np.cos(-angle+phi),-np.sin(-angle+phi)], + [ np.sin(-angle+phi), np.cos(-angle+phi)]]) + proc = np.array((I, Q)) + proc = np.dot(rot_matrix, proc) + return proc.transpose() + +def _calculate_fid_and_threshold(x0, n0, x1, n1): + """ + Calculate fidelity and threshold from histogram data: + x0, n0 is the histogram data of shots 0 (value and occurences), + x1, n1 is the histogram data of shots 1 (value and occurences). + """ + # Build cumulative histograms of shots 0 + # and 1 in common bins by interpolation. + all_x = np.unique(np.sort(np.concatenate((x0, x1)))) + cumsum0, cumsum1 = np.cumsum(n0), np.cumsum(n1) + ecumsum0 = np.interp(x=all_x, xp=x0, fp=cumsum0, left=0) + necumsum0 = ecumsum0/np.max(ecumsum0) + ecumsum1 = np.interp(x=all_x, xp=x1, fp=cumsum1, left=0) + necumsum1 = ecumsum1/np.max(ecumsum1) + # Calculate optimal threshold and fidelity + F_vs_th = (1-(1-abs(necumsum0 - necumsum1))/2) + opt_idxs = np.argwhere(F_vs_th == np.amax(F_vs_th)) + opt_idx = int(round(np.average(opt_idxs))) + F_assignment_raw = F_vs_th[opt_idx] + threshold_raw = all_x[opt_idx] + return F_assignment_raw, threshold_raw + +def _fit_double_gauss(x_vals, hist_0, hist_1, + _x0_guess=None, _x1_guess=None): + ''' + Fit two histograms to a double gaussian with + common parameters. From fitted parameters, + calculate SNR, Pe0, Pg1, Teff, Ffit and Fdiscr. + ''' + from scipy.optimize import curve_fit + # Double gaussian model for fitting + def _gauss_pdf(x, x0, sigma): + return np.exp(-((x-x0)/sigma)**2/2) + global double_gauss + def double_gauss(x, x0, x1, sigma0, sigma1, A, r): + _dist0 = A*( (1-r)*_gauss_pdf(x, x0, sigma0) + r*_gauss_pdf(x, x1, sigma1) ) + return _dist0 + # helper function to simultaneously fit both histograms with common parameters + def _double_gauss_joint(x, x0, x1, sigma0, sigma1, A0, A1, r0, r1): + _dist0 = double_gauss(x, x0, x1, sigma0, sigma1, A0, r0) + _dist1 = double_gauss(x, x1, x0, sigma1, sigma0, A1, r1) + return np.concatenate((_dist0, _dist1)) + # Guess for fit + pdf_0 = hist_0/np.sum(hist_0) # Get prob. distribution + pdf_1 = hist_1/np.sum(hist_1) # + if _x0_guess == None: + _x0_guess = np.sum(x_vals*pdf_0) # calculate mean + if _x1_guess == None: + _x1_guess = np.sum(x_vals*pdf_1) # + _sigma0_guess = np.sqrt(np.sum((x_vals-_x0_guess)**2*pdf_0)) # calculate std + _sigma1_guess = np.sqrt(np.sum((x_vals-_x1_guess)**2*pdf_1)) # + _r0_guess = 0.01 + _r1_guess = 0.05 + _A0_guess = np.max(hist_0) + _A1_guess = np.max(hist_1) + p0 = [_x0_guess, _x1_guess, _sigma0_guess, _sigma1_guess, _A0_guess, _A1_guess, _r0_guess, _r1_guess] + # Bounding parameters + _x0_bound = (-np.inf,np.inf) + _x1_bound = (-np.inf,np.inf) + _sigma0_bound = (0,np.inf) + _sigma1_bound = (0,np.inf) + _r0_bound = (0,1) + _r1_bound = (0,1) + _A0_bound = (0,np.inf) + _A1_bound = (0,np.inf) + bounds = np.array([_x0_bound, _x1_bound, _sigma0_bound, _sigma1_bound, _A0_bound, _A1_bound, _r0_bound, _r1_bound]) + # Fit parameters within bounds + popt, pcov = curve_fit( + _double_gauss_joint, x_vals, + np.concatenate((hist_0, hist_1)), + p0=p0, bounds=bounds.transpose()) + popt0 = popt[[0,1,2,3,4,6]] + popt1 = popt[[1,0,3,2,5,7]] + # Calculate quantities of interest + SNR = abs(popt0[0] - popt1[0])/((abs(popt0[2])+abs(popt1[2]))/2) + P_e0 = popt0[5]*popt0[2]/(popt0[2]*popt0[5] + popt0[3]*(1-popt0[5])) + P_g1 = popt1[5]*popt1[2]/(popt1[2]*popt1[5] + popt1[3]*(1-popt1[5])) + # Fidelity from fit + _range = x_vals[0], x_vals[-1] + _x_data = np.linspace(*_range, 10001) + _h0 = double_gauss(_x_data, *popt0)# compute distrubition from + _h1 = double_gauss(_x_data, *popt1)# fitted parameters. + Fid_fit, threshold_fit = _calculate_fid_and_threshold(_x_data, _h0, _x_data, _h1) + # Discrimination fidelity + _h0 = double_gauss(_x_data, *popt0[:-1], 0)# compute distrubition without residual + _h1 = double_gauss(_x_data, *popt1[:-1], 0)# excitation of relaxation. + Fid_discr, threshold_discr = _calculate_fid_and_threshold(_x_data, _h0, _x_data, _h1) + # return results + qoi = { 'SNR': SNR, + 'P_e0': P_e0, 'P_g1': P_g1, + 'Fid_fit': Fid_fit, 'Fid_discr': Fid_discr } + return popt0, popt1, qoi + +def _twoD_Gaussian(x, y, x0, y0, sigma_x, sigma_y, theta=0): + ''' + Funtion that paramatrizes a + single 2D gaussian distribution. + ''' + x0 = float(x0) + y0 = float(y0) + a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2) + b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2) + c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2) + g = np.exp( - (a*((x-x0)**2) + 2*b*(x-x0)*(y-y0) + c*((y-y0)**2))) + return g + +def triple_gauss(x, y, + x0, x1, x2, + y0, y1, y2, + sigma_x_0, sigma_x_1, sigma_x_2, + sigma_y_0, sigma_y_1, sigma_y_2, + A, r_1, r_2): + ''' + Function that paramtrizes a 2D + 3-gaussian mixture. + ''' + _dist = A*((1-r_1-r_2)*_twoD_Gaussian(x, y, x0, y0, sigma_x_0, sigma_y_0)+ + r_1*_twoD_Gaussian(x, y, x1, y1, sigma_x_1, sigma_y_1)+ + r_2*_twoD_Gaussian(x, y, x2, y2, sigma_x_2, sigma_y_2)) + return _dist + +def _triple_gauss_joint(data, + x0, x1, x2, + y0, y1, y2, + sigma_x_0, sigma_x_1, sigma_x_2, + sigma_y_0, sigma_y_1, sigma_y_2, + A0, A1, A2, + r0_1, r1_1, r2_1, + r0_2, r1_2, r2_2): + ''' + Helper function to fit 3 simultatneous mixtures + of 3 gaussians each in 2D. + ''' + x, y = data + _dist0 = triple_gauss(x, y, + x0, x1, x2, + y0, y1, y2, + sigma_x_0, sigma_x_1, sigma_x_2, + sigma_y_0, sigma_y_1, sigma_y_2, + A0, r0_1, r0_2) + _dist1 = triple_gauss(x, y, + x1, x0, x2, + y1, y0, y2, + sigma_x_1, sigma_x_0, sigma_x_2, + sigma_y_1, sigma_y_0, sigma_y_2, + A1, r1_1, r1_2) + _dist2 = triple_gauss(x, y, + x2, x1, x0, + y2, y1, y0, + sigma_x_2, sigma_x_1, sigma_x_0, + sigma_y_2, sigma_y_1, sigma_y_0, + A2, r2_1, r2_2) + return np.concatenate((_dist0.ravel(), _dist1.ravel(), _dist2.ravel())) + +def _fit_triple_gauss(x_vals, y_vals, hist_0, hist_1, hist_2): + ''' + Fit three 2D histograms to a 3-gaussian mixture with + common parameters. From fitted parameters, calculate + P0i, P1i and P2i for each input state i=g,e,f. + ''' + from scipy.optimize import curve_fit + ##################### + # Guesses for fit + ##################### + # Get prob. distribution + pdf_0 = hist_0/np.sum(hist_0) + pdf_1 = hist_1/np.sum(hist_1) + pdf_2 = hist_2/np.sum(hist_2) + # calculate mean + _x0_guess, _y0_guess = np.sum(np.dot(pdf_0, x_vals)), np.sum(np.dot(pdf_0.T, y_vals)) + _x1_guess, _y1_guess = np.sum(np.dot(pdf_1, x_vals)), np.sum(np.dot(pdf_1.T, y_vals)) + _x2_guess, _y2_guess = np.sum(np.dot(pdf_2, x_vals)), np.sum(np.dot(pdf_2.T, y_vals)) + # calculate standard deviation + _sigma_x_0_guess, _sigma_y_0_guess = np.sqrt(np.sum(np.dot(pdf_0, (x_vals-_x0_guess)**2))), np.sqrt(np.sum(np.dot(pdf_0.T, (y_vals-_y0_guess)**2))) + _sigma_x_1_guess, _sigma_y_1_guess = np.sqrt(np.sum(np.dot(pdf_1, (x_vals-_x1_guess)**2))), np.sqrt(np.sum(np.dot(pdf_1.T, (y_vals-_y1_guess)**2))) + _sigma_x_2_guess, _sigma_y_2_guess = np.sqrt(np.sum(np.dot(pdf_2, (x_vals-_x2_guess)**2))), np.sqrt(np.sum(np.dot(pdf_2.T, (y_vals-_y2_guess)**2))) + # residual populations + _r0_1_guess, _r0_2_guess = 0.05, 0.05 + _r1_1_guess, _r1_2_guess = 0.05, 0.05 + _r2_1_guess, _r2_2_guess = 0.05, 0.05 + # Amplitudes + _A0_guess = np.max(hist_0) + _A1_guess = np.max(hist_1) + _A2_guess = np.max(hist_2) + # Guess array + p0 = [_x0_guess, _x1_guess, _x2_guess, + _y0_guess, _y1_guess, _y2_guess, + _sigma_x_0_guess, _sigma_x_1_guess, _sigma_x_2_guess, + _sigma_y_0_guess, _sigma_y_1_guess, _sigma_y_2_guess, + _A0_guess, _A1_guess, _A2_guess, + _r0_1_guess, _r1_1_guess, _r2_1_guess, + _r0_2_guess, _r1_2_guess, _r2_2_guess] + # Bounding parameters + _x0_bound, _y0_bound = (-np.inf,np.inf), (-np.inf,np.inf) + _x1_bound, _y1_bound = (-np.inf,np.inf), (-np.inf,np.inf) + _x2_bound, _y2_bound = (-np.inf,np.inf), (-np.inf,np.inf) + _sigma_x_0_bound, _sigma_y_0_bound = (0,np.inf), (0,np.inf) + _sigma_x_1_bound, _sigma_y_1_bound = (0,np.inf), (0,np.inf) + _sigma_x_2_bound, _sigma_y_2_bound = (0,np.inf), (0,np.inf) + _r0_bound = (0,1) + _r1_bound = (0,1) + _r2_bound = (0,1) + _A0_bound = (0,np.inf) + _A1_bound = (0,np.inf) + _A2_bound = (0,np.inf) + bounds = np.array([_x0_bound, _x1_bound, _x2_bound, + _y0_bound, _y1_bound, _y2_bound, + _sigma_x_0_bound, _sigma_x_1_bound, _sigma_x_2_bound, + _sigma_y_0_bound, _sigma_y_1_bound, _sigma_y_2_bound, + _A0_bound, _A1_bound, _A2_bound, + _r0_bound, _r1_bound, _r2_bound, + _r0_bound, _r1_bound, _r2_bound]) + # Create meshgrid of points + _X, _Y = np.meshgrid(x_vals, y_vals) + # Fit parameters within bounds + popt, pcov = curve_fit( + _triple_gauss_joint, (_X, _Y), + np.concatenate((hist_0.ravel(), hist_1.ravel(), hist_2.ravel())), + p0=p0, bounds=bounds.transpose()) + # x0 x1 x2 y0 y1 y2 sx0 sx1 sx2 sy0 sy1 sy2 A r1 r2 + popt0 = popt[[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 15, 18]] + popt1 = popt[[ 1, 0, 2, 4, 3, 5, 7, 6, 8, 10, 9, 11, 13, 16, 19]] + popt2 = popt[[ 2, 1, 0, 5, 4, 3, 8, 7, 6, 11, 10, 9, 14, 17, 20]] + ################################## + # Calculate quantites of interest + ################################## + # ground state distribution + sx0, sx1, sx2 = popt0[6], popt1[6], popt2[6] + sy0, sy1, sy2 = popt0[9], popt1[9], popt2[9] + r1 = popt0[13] + r2 = popt0[14] + P_0g = ((1-r1-r2)*sx0*sy0)/((1-r1-r2)*sx0*sy0 + r1*sx1*sy1 + r2*sx2*sy2) + P_1g = (r1*sx1*sy1)/((1-r1-r2)*sx0*sy0 + r1*sx1*sy1 + r2*sx2*sy2) + P_2g = (r2*sx2*sy2)/((1-r1-r2)*sx0*sy0 + r1*sx1*sy1 + r2*sx2*sy2) + # excited state distribution + r1 = popt1[13] + r2 = popt1[14] + P_0e = ((1-r1-r2)*sx0*sy0)/((1-r1-r2)*sx0*sy0 + r1*sx1*sy1 + r2*sx2*sy2) + P_1e = (r1*sx1*sy1)/((1-r1-r2)*sx0*sy0 + r1*sx1*sy1 + r2*sx2*sy2) + P_2e = (r2*sx2*sy2)/((1-r1-r2)*sx0*sy0 + r1*sx1*sy1 + r2*sx2*sy2) + # f state distribution + r1 = popt2[13] + r2 = popt2[14] + P_0f = ((1-r1-r2)*sx0*sy0)/((1-r1-r2)*sx0*sy0 + r1*sx1*sy1 + r2*sx2*sy2) + P_1f = (r1*sx1*sy1)/((1-r1-r2)*sx0*sy0 + r1*sx1*sy1 + r2*sx2*sy2) + P_2f = (r2*sx2*sy2)/((1-r1-r2)*sx0*sy0 + r1*sx1*sy1 + r2*sx2*sy2) + # qoi dictionary + qoi = {'P_0g':P_0g, 'P_1g':P_1g, 'P_2g':P_2g, + 'P_0e':P_0e, 'P_1e':P_1e, 'P_2e':P_2e, + 'P_0f':P_0f, 'P_1f':P_1f, 'P_2f':P_2f} + return popt0, popt1, popt2, qoi + +def _decision_boundary_points(coefs, intercepts): + ''' + Find points along the decision boundaries of + LinearDiscriminantAnalysis (LDA). + This is performed by finding the interception + of the bounds of LDA. For LDA, these bounds are + encoded in the coef_ and intercept_ parameters + of the classifier. + Each bound is given by the equation: + y + coef_i[0]/coef_i[1]*x + intercept_i = 0 + Note this only works for LinearDiscriminantAnalysis. + Other classifiers might have diferent bound models. + ''' + points = {} + # Cycle through model coeficients + # and intercepts. + n = len(intercepts) + if n == 3: + _bounds = [[0,1], [1,2], [0,2]] + if n == 4: + _bounds = [[0,1], [1,2], [2,3], [3,0]] + for i, j in _bounds: + c_i = coefs[i] + int_i = intercepts[i] + c_j = coefs[j] + int_j = intercepts[j] + x = (- int_j/c_j[1] + int_i/c_i[1])/(-c_i[0]/c_i[1] + c_j[0]/c_j[1]) + y = -c_i[0]/c_i[1]*x - int_i/c_i[1] + points[f'{i}{j}'] = (x, y) + # Find mean point + points['mean'] = np.mean([ [x, y] for (x, y) in points.values()], axis=0) + return points + +class LRU_experiment_Analysis(ba.BaseDataAnalysis): + """ + Analysis for LRU experiment. + """ + def __init__(self, + qubit: str, + h_state: bool = False, + heralded_init: bool = False, + fit_3gauss: bool = False, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + auto=True + ): + + super().__init__(t_start=t_start, t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) + + self.qubit = qubit + self.h_state = h_state + self.heralded_init = heralded_init + self.fit_3gauss = fit_3gauss + if auto: + self.run_analysis() + + def extract_data(self): + """ + This is a new style (sept 2019) data extraction. + This could at some point move to a higher level class. + """ + self.get_timestamps() + self.timestamp = self.timestamps[0] + data_fp = get_datafilepath_from_timestamp(self.timestamp) + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + self.raw_data_dict = h5d.extract_pars_from_datafile( + data_fp, param_spec) + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + + def process_data(self): + # Perform measurement post-selection + _cycle = 4 + if self.h_state: + _cycle += 2 + if self.heralded_init: + _cycle *= 2 + ############################################ + # Rotate shots in IQ plane + ############################################ + # Sort shots + _raw_shots = self.raw_data_dict['data'][:,1:] + if self.heralded_init: + _shots_0 = _raw_shots[1::_cycle] + _shots_1 = _raw_shots[3::_cycle] + _shots_2 = _raw_shots[5::_cycle] + _shots_lru = _raw_shots[7::_cycle] + if self.h_state: + _shots_3 = _raw_shots[9::_cycle] + _shots_hlru = _raw_shots[11::_cycle] + else: + _shots_0 = _raw_shots[0::_cycle] + _shots_1 = _raw_shots[1::_cycle] + _shots_2 = _raw_shots[2::_cycle] + _shots_lru = _raw_shots[3::_cycle] + if self.h_state: + _shots_3 = _raw_shots[4::_cycle] + _shots_hlru = _raw_shots[5::_cycle] + # Save raw shots + self.proc_data_dict['shots_0_IQ'] = _shots_0 + self.proc_data_dict['shots_1_IQ'] = _shots_1 + self.proc_data_dict['shots_2_IQ'] = _shots_2 + self.proc_data_dict['shots_lru_IQ'] = _shots_lru + if self.h_state: + self.proc_data_dict['shots_3_IQ'] = _shots_3 + self.proc_data_dict['shots_hlru_IQ'] = _shots_hlru + # Rotate data + center_0 = np.array([np.mean(_shots_0[:,0]), np.mean(_shots_0[:,1])]) + center_1 = np.array([np.mean(_shots_1[:,0]), np.mean(_shots_1[:,1])]) + raw_shots = rotate_and_center_data(_raw_shots[:,0], _raw_shots[:,1], center_0, center_1) + ##################################################### + # From this point onward raw shots has shape + # (nr_shots, nr_quadratures). + # Post select based on heralding measurement result. + ##################################################### + if self.heralded_init: + # estimate post-selection threshold + shots_0 = raw_shots[1::_cycle, 0] + shots_1 = raw_shots[3::_cycle, 0] + ps_th = (np.mean(shots_0)+np.mean(shots_1))/2 + # Sort heralding shots from experiment shots + ps_shots = raw_shots[0::2,0] # only I quadrature needed for postselection + exp_shots = raw_shots[1::2] # Here we want to keep both quadratures + # create post-selection mask + _mask = [ 1 if s contains post-selected + # shots of state and has shape (nr_ps_shots, nr_quadtrs). + # Next we will analyze shots projected along axis and + # therefore use a single quadrature. shots_ will be used + # to denote that array of shots. + ############################################################## + self.qoi = {} + if not self.h_state: + ############################################ + # Use classifier to assign states in the + # IQ plane and calculate qutrit fidelity. + ############################################ + # Parse data for classifier + data = np.concatenate((Shots_0, Shots_1, Shots_2)) + labels = [0 for s in Shots_0]+[1 for s in Shots_1]+[2 for s in Shots_2] + from sklearn.discriminant_analysis import LinearDiscriminantAnalysis + clf = LinearDiscriminantAnalysis() + clf.fit(data, labels) + dec_bounds = _decision_boundary_points(clf.coef_, clf.intercept_) + Fid_dict = {} + for state, shots in zip([ '0', '1', '2'], + [Shots_0, Shots_1, Shots_2]): + _res = clf.predict(shots) + _fid = np.mean(_res == int(state)) + Fid_dict[state] = _fid + Fid_dict['avg'] = np.mean([f for f in Fid_dict.values()]) + # Get assignment fidelity matrix + M = np.zeros((3,3)) + for i, shots in enumerate([Shots_0, Shots_1, Shots_2]): + for j, state in enumerate(['0', '1', '2']): + _res = clf.predict(shots) + M[i][j] = np.mean(_res == int(state)) + # Get leakage removal fraction + _res = clf.predict(Shots_lru) + _vec = np.array([np.mean(_res == int('0')), + np.mean(_res == int('1')), + np.mean(_res == int('2'))]) + M_inv = np.linalg.inv(M) + pop_vec = np.dot(_vec, M_inv) + self.proc_data_dict['classifier'] = clf + self.proc_data_dict['dec_bounds'] = dec_bounds + self.proc_data_dict['Fid_dict'] = Fid_dict + self.qoi['Fid_dict'] = Fid_dict + self.qoi['Assignment_matrix'] = M + self.qoi['pop_vec'] = pop_vec + self.qoi['removal_fraction'] = 1-pop_vec[2] + else: + ############################################ + # Use classifier to assign states in the + # IQ plane and calculate ququat fidelity. + ############################################ + # Parse data for classifier + data = np.concatenate((Shots_0, Shots_1, Shots_2, Shots_3)) + labels = [0 for s in Shots_0]+[1 for s in Shots_1]+\ + [2 for s in Shots_2]+[3 for s in Shots_3] + from sklearn.discriminant_analysis import LinearDiscriminantAnalysis + hclf = LinearDiscriminantAnalysis() + hclf.fit(data, labels) + dec_bounds = _decision_boundary_points(hclf.coef_, hclf.intercept_) + Fid_dict = {} + for state, shots in zip([ '0', '1', '2', '3'], + [Shots_0, Shots_1, Shots_2, Shots_3]): + _res = hclf.predict(shots) + _fid = np.mean(_res == int(state)) + Fid_dict[state] = _fid + Fid_dict['avg'] = np.mean([f for f in Fid_dict.values()]) + # Get assignment fidelity matrix + M = np.zeros((4,4)) + for i, shots in enumerate([Shots_0, Shots_1, Shots_2, Shots_3]): + for j, state in enumerate(['0', '1', '2', '3']): + _res = hclf.predict(shots) + M[i][j] = np.mean(_res == int(state)) + # Get leakage removal fraction + _res_f = hclf.predict(Shots_lru) + _vec_f = np.array([np.mean(_res_f == int('0')), + np.mean(_res_f == int('1')), + np.mean(_res_f == int('2')), + np.mean(_res_f == int('3'))]) + M_inv = np.linalg.inv(M) + pop_vec_f = np.dot(_vec_f, M_inv) + # Get leakage removal fraction + _res_h = hclf.predict(Shots_hlru) + _vec_h = np.array([np.mean(_res_h == int('0')), + np.mean(_res_h == int('1')), + np.mean(_res_h == int('2')), + np.mean(_res_h == int('3'))]) + M_inv = np.linalg.inv(M) + pop_vec_h = np.dot(_vec_h, M_inv) + self.proc_data_dict['classifier'] = hclf + self.proc_data_dict['dec_bounds'] = dec_bounds + self.proc_data_dict['Fid_dict'] = Fid_dict + self.qoi['Fid_dict'] = Fid_dict + self.qoi['Assignment_matrix'] = M + self.qoi['pop_vec'] = pop_vec_f + self.qoi['pop_vec_h'] = pop_vec_h + self.qoi['removal_fraction'] = 1-pop_vec_f[2] + self.qoi['removal_fraction_h'] = 1-pop_vec_h[3] + ######################################### + # Project data along axis perpendicular + # to the decision boundaries. + ######################################### + ############################ + # Projection along 10 axis. + ############################ + # Rotate shots over 01 decision boundary axis + shots_0 = rotate_and_center_data(Shots_0[:,0],Shots_0[:,1], dec_bounds['mean'], dec_bounds['01'], phi=np.pi/2) + shots_1 = rotate_and_center_data(Shots_1[:,0],Shots_1[:,1], dec_bounds['mean'], dec_bounds['01'], phi=np.pi/2) + # Take relavant quadrature + shots_0 = shots_0[:,0] + shots_1 = shots_1[:,0] + n_shots_1 = len(shots_1) + # find range + _all_shots = np.concatenate((shots_0, shots_1)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x0, n0 = np.unique(shots_0, return_counts=True) + x1, n1 = np.unique(shots_1, return_counts=True) + Fid_01, threshold_01 = _calculate_fid_and_threshold(x0, n0, x1, n1) + # Histogram of shots for 1 and 2 + h0, bin_edges = np.histogram(shots_0, bins=100, range=_range) + h1, bin_edges = np.histogram(shots_1, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt0, popt1, params_01 = _fit_double_gauss(bin_centers, h0, h1) + # Save processed data + self.proc_data_dict['projection_01'] = {} + self.proc_data_dict['projection_01']['h0'] = h0 + self.proc_data_dict['projection_01']['h1'] = h1 + self.proc_data_dict['projection_01']['bin_centers'] = bin_centers + self.proc_data_dict['projection_01']['popt0'] = popt0 + self.proc_data_dict['projection_01']['popt1'] = popt1 + self.proc_data_dict['projection_01']['SNR'] = params_01['SNR'] + self.proc_data_dict['projection_01']['Fid'] = Fid_01 + self.proc_data_dict['projection_01']['threshold'] = threshold_01 + ############################ + # Projection along 12 axis. + ############################ + # Rotate shots over 12 decision boundary axis + shots_1 = rotate_and_center_data(Shots_1[:,0],Shots_1[:,1],dec_bounds['mean'], dec_bounds['12'], phi=np.pi/2) + shots_2 = rotate_and_center_data(Shots_2[:,0],Shots_2[:,1],dec_bounds['mean'], dec_bounds['12'], phi=np.pi/2) + # Take relavant quadrature + shots_1 = shots_1[:,0] + shots_2 = shots_2[:,0] + n_shots_2 = len(shots_2) + # find range + _all_shots = np.concatenate((shots_1, shots_2)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x1, n1 = np.unique(shots_1, return_counts=True) + x2, n2 = np.unique(shots_2, return_counts=True) + Fid_12, threshold_12 = _calculate_fid_and_threshold(x1, n1, x2, n2) + # Histogram of shots for 1 and 2 + h1, bin_edges = np.histogram(shots_1, bins=100, range=_range) + h2, bin_edges = np.histogram(shots_2, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt1, popt2, params_12 = _fit_double_gauss(bin_centers, h1, h2) + # Save processed data + self.proc_data_dict['projection_12'] = {} + self.proc_data_dict['projection_12']['h1'] = h1 + self.proc_data_dict['projection_12']['h2'] = h2 + self.proc_data_dict['projection_12']['bin_centers'] = bin_centers + self.proc_data_dict['projection_12']['popt1'] = popt1 + self.proc_data_dict['projection_12']['popt2'] = popt2 + self.proc_data_dict['projection_12']['SNR'] = params_12['SNR'] + self.proc_data_dict['projection_12']['Fid'] = Fid_12 + self.proc_data_dict['projection_12']['threshold'] = threshold_12 + + if not self.h_state: + ############################ + # Projection along 02 axis. + ############################ + # Rotate shots over 02 decision boundary axis + shots_0 = rotate_and_center_data(Shots_0[:,0],Shots_0[:,1],dec_bounds['mean'],dec_bounds['02'], phi=np.pi/2) + shots_2 = rotate_and_center_data(Shots_2[:,0],Shots_2[:,1],dec_bounds['mean'],dec_bounds['02'], phi=np.pi/2) + # Take relavant quadrature + shots_0 = shots_0[:,0] + shots_2 = shots_2[:,0] + n_shots_2 = len(shots_2) + # find range + _all_shots = np.concatenate((shots_0, shots_2)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x0, n0 = np.unique(shots_0, return_counts=True) + x2, n2 = np.unique(shots_2, return_counts=True) + Fid_02, threshold_02 = _calculate_fid_and_threshold(x0, n0, x2, n2) + # Histogram of shots for 1 and 2 + h0, bin_edges = np.histogram(shots_0, bins=100, range=_range) + h2, bin_edges = np.histogram(shots_2, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt0, popt2, params_02 = _fit_double_gauss(bin_centers, h0, h2) + # Save processed data + self.proc_data_dict['projection_02'] = {} + self.proc_data_dict['projection_02']['h0'] = h0 + self.proc_data_dict['projection_02']['h2'] = h2 + self.proc_data_dict['projection_02']['bin_centers'] = bin_centers + self.proc_data_dict['projection_02']['popt0'] = popt0 + self.proc_data_dict['projection_02']['popt2'] = popt2 + self.proc_data_dict['projection_02']['SNR'] = params_02['SNR'] + self.proc_data_dict['projection_02']['Fid'] = Fid_02 + self.proc_data_dict['projection_02']['threshold'] = threshold_02 + else: + ############################ + # Projection along 23 axis. + ############################ + # Rotate shots over 23 decision boundary axis + shots_2 = rotate_and_center_data(Shots_2[:,0],Shots_2[:,1],dec_bounds['mean'],dec_bounds['23'], phi=np.pi/2) + shots_3 = rotate_and_center_data(Shots_3[:,0],Shots_3[:,1],dec_bounds['mean'],dec_bounds['23'], phi=np.pi/2) + # Take relavant quadrature + shots_3 = shots_3[:,0] + shots_2 = shots_2[:,0] + n_shots_2 = len(shots_2) + # find range + _all_shots = np.concatenate((shots_3, shots_2)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x3, n3 = np.unique(shots_3, return_counts=True) + x2, n2 = np.unique(shots_2, return_counts=True) + Fid_23, threshold_23 = _calculate_fid_and_threshold(x3, n3, x2, n2) + # Histogram of shots for 1 and 2 + h3, bin_edges = np.histogram(shots_3, bins=100, range=_range) + h2, bin_edges = np.histogram(shots_2, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt3, popt2, params_23 = _fit_double_gauss(bin_centers, h3, h2) + # Save processed data + self.proc_data_dict['projection_23'] = {} + self.proc_data_dict['projection_23']['h3'] = h3 + self.proc_data_dict['projection_23']['h2'] = h2 + self.proc_data_dict['projection_23']['bin_centers'] = bin_centers + self.proc_data_dict['projection_23']['popt3'] = popt3 + self.proc_data_dict['projection_23']['popt2'] = popt2 + self.proc_data_dict['projection_23']['SNR'] = params_23['SNR'] + self.proc_data_dict['projection_23']['Fid'] = Fid_23 + self.proc_data_dict['projection_23']['threshold'] = threshold_23 + ############################ + # Projection along 30 axis. + ############################ + # Rotate shots over 30 decision boundary axis + shots_0 = rotate_and_center_data(Shots_0[:,0],Shots_0[:,1],dec_bounds['mean'],dec_bounds['30'], phi=np.pi/2) + shots_3 = rotate_and_center_data(Shots_3[:,0],Shots_3[:,1],dec_bounds['mean'],dec_bounds['30'], phi=np.pi/2) + # Take relavant quadrature + shots_3 = shots_3[:,0] + shots_0 = shots_0[:,0] + n_shots_3 = len(shots_3) + # find range + _all_shots = np.concatenate((shots_3, shots_0)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x3, n3 = np.unique(shots_3, return_counts=True) + x0, n0 = np.unique(shots_0, return_counts=True) + Fid_30, threshold_30 = _calculate_fid_and_threshold(x3, n3, x0, n0) + # Histogram of shots for 1 and 2 + h3, bin_edges = np.histogram(shots_3, bins=100, range=_range) + h0, bin_edges = np.histogram(shots_0, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt3, popt0, params_30 = _fit_double_gauss(bin_centers, h3, h0) + # Save processed data + self.proc_data_dict['projection_30'] = {} + self.proc_data_dict['projection_30']['h3'] = h3 + self.proc_data_dict['projection_30']['h0'] = h0 + self.proc_data_dict['projection_30']['bin_centers'] = bin_centers + self.proc_data_dict['projection_30']['popt3'] = popt3 + self.proc_data_dict['projection_30']['popt0'] = popt0 + self.proc_data_dict['projection_30']['SNR'] = params_30['SNR'] + self.proc_data_dict['projection_30']['Fid'] = Fid_30 + self.proc_data_dict['projection_30']['threshold'] = threshold_30 + + ############################### + # Fit 3-gaussian mixture + ############################### + if self.fit_3gauss: + _all_shots = np.concatenate((self.proc_data_dict['shots_0_IQ'], + self.proc_data_dict['shots_1_IQ'], + self.proc_data_dict['shots_2_IQ'])) + _lim = np.max([ np.max(np.abs(_all_shots[:,0]))*1.1, + np.max(np.abs(_all_shots[:,1]))*1.1 ]) + def _histogram_2d(x_data, y_data, lim): + _bins = (np. linspace(-lim, lim, 201), np.linspace(-lim, lim, 201)) + n, _xbins, _ybins = np.histogram2d(x_data, y_data, bins=_bins) + return n.T, _xbins, _ybins + # 2D histograms + n_0, xbins, ybins = _histogram_2d(*self.proc_data_dict['shots_0_IQ'].T, lim=_lim) + n_1, xbins, ybins = _histogram_2d(*self.proc_data_dict['shots_1_IQ'].T, lim=_lim) + n_2, xbins, ybins = _histogram_2d(*self.proc_data_dict['shots_2_IQ'].T, lim=_lim) + n_3, xbins, ybins = _histogram_2d(*self.proc_data_dict['shots_lru_IQ'].T, lim=_lim) + # bin centers + xbins_c, ybins_c = (xbins[1:]+xbins[:-1])/2, (ybins[1:]+ybins[:-1])/2 + popt0, popt1, popt2, qoi = _fit_triple_gauss(xbins_c, ybins_c, n_0, n_1, n_2) + popt3, _popt1, _popt2, _qoi = _fit_triple_gauss(xbins_c, ybins_c, n_3, n_1, n_2) + self.proc_data_dict['3gauss_fit'] = {} + self.proc_data_dict['3gauss_fit']['xbins'] = xbins + self.proc_data_dict['3gauss_fit']['ybins'] = ybins + self.proc_data_dict['3gauss_fit']['n0'] = n_0 + self.proc_data_dict['3gauss_fit']['n1'] = n_1 + self.proc_data_dict['3gauss_fit']['n2'] = n_2 + self.proc_data_dict['3gauss_fit']['n3'] = n_3 + self.proc_data_dict['3gauss_fit']['popt0'] = popt0 + self.proc_data_dict['3gauss_fit']['popt1'] = popt1 + self.proc_data_dict['3gauss_fit']['popt2'] = popt2 + self.proc_data_dict['3gauss_fit']['popt3'] = popt3 + self.proc_data_dict['3gauss_fit']['qoi'] = _qoi + self.proc_data_dict['3gauss_fit']['removal_fraction'] = 1-_qoi['P_2g'] + self.qoi['removal_fraction_gauss_fit'] = 1-_qoi['P_2g'] + + def prepare_plots(self): + self.axs_dict = {} + fig = plt.figure(figsize=(8,4), dpi=100) + if self.h_state: + axs = [fig.add_subplot(121), + fig.add_subplot(422), + fig.add_subplot(424), + fig.add_subplot(426), + fig.add_subplot(428)] + else: + axs = [fig.add_subplot(121), + fig.add_subplot(322), + fig.add_subplot(324), + fig.add_subplot(326)] + # fig.patch.set_alpha(0) + self.axs_dict['SSRO_plot'] = axs[0] + self.figs['SSRO_plot'] = fig + self.plot_dicts['SSRO_plot'] = { + 'plotfn': ssro_IQ_projection_plotfn, + 'ax_id': 'SSRO_plot', + 'shots_0': self.proc_data_dict['Shots_0'], + 'shots_1': self.proc_data_dict['Shots_1'], + 'shots_2': self.proc_data_dict['Shots_2'], + 'shots_3': self.proc_data_dict['Shots_3'] if self.h_state \ + else None, + 'projection_01': self.proc_data_dict['projection_01'], + 'projection_12': self.proc_data_dict['projection_12'], + 'projection_02': None if self.h_state else\ + self.proc_data_dict['projection_02'], + 'projection_23': None if not self.h_state else\ + self.proc_data_dict['projection_23'], + 'projection_30': None if not self.h_state else\ + self.proc_data_dict['projection_30'], + 'classifier': self.proc_data_dict['classifier'], + 'dec_bounds': self.proc_data_dict['dec_bounds'], + 'Fid_dict': self.proc_data_dict['Fid_dict'], + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + fig, ax = plt.subplots(figsize=(3.25,3.25), dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict['Leakage_histogram_f_state'] = ax + self.figs['Leakage_histogram_f_state'] = fig + self.plot_dicts['Leakage_histogram_f_state'] = { + 'plotfn': leakage_hist_plotfn, + 'ax_id': 'Leakage_histogram_f_state', + 'shots_0': self.proc_data_dict['Shots_0'], + 'shots_1': self.proc_data_dict['Shots_1'], + 'shots_2': self.proc_data_dict['Shots_2'], + 'shots_3': self.proc_data_dict['Shots_3'] if self.h_state \ + else None, + 'shots_lru': self.proc_data_dict['Shots_lru'], + 'classifier': self.proc_data_dict['classifier'], + 'dec_bounds': self.proc_data_dict['dec_bounds'], + 'pop_vec': self.qoi['pop_vec'], + 'state' : '2', + 'qoi': self.qoi, + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + if self.h_state: + fig, ax = plt.subplots(figsize=(3.25,3.25), dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict['Leakage_histogram_h_state'] = ax + self.figs['Leakage_histogram_h_state'] = fig + self.plot_dicts['Leakage_histogram'] = { + 'plotfn': leakage_hist_plotfn, + 'ax_id': 'Leakage_histogram_h_state', + 'shots_0': self.proc_data_dict['Shots_0'], + 'shots_1': self.proc_data_dict['Shots_1'], + 'shots_2': self.proc_data_dict['Shots_2'], + 'shots_3': self.proc_data_dict['Shots_3'], + 'shots_lru': self.proc_data_dict['Shots_hlru'], + 'classifier': self.proc_data_dict['classifier'], + 'dec_bounds': self.proc_data_dict['dec_bounds'], + 'pop_vec': self.qoi['pop_vec_h'], + 'state' : '3', + 'qoi': self.qoi, + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + fig, ax = plt.subplots(figsize=(3,3), dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict['Assignment_matrix'] = ax + self.figs['Assignment_matrix'] = fig + self.plot_dicts['Assignment_matrix'] = { + 'plotfn': assignment_matrix_plotfn, + 'ax_id': 'Assignment_matrix', + 'M': self.qoi['Assignment_matrix'], + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + if self.fit_3gauss: + fig = plt.figure(figsize=(12,4), dpi=100) + axs = [None,None,None] + axs[0] = fig.add_subplot(1, 3, 1) + axs[2] = fig.add_subplot(1, 3, 3 , projection='3d', elev=20) + axs[1] = fig.add_subplot(1, 3, 2) + # fig.patch.set_alpha(0) + self.axs_dict['Three_gauss_fit'] = axs[0] + self.figs['Three_gauss_fit'] = fig + self.plot_dicts['Three_gauss_fit'] = { + 'plotfn': gauss_fit2D_plotfn, + 'ax_id': 'Three_gauss_fit', + 'fit_dict': self.proc_data_dict['3gauss_fit'], + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def ssro_IQ_plotfn( + shots_0, + shots_1, + shots_2, + shots_3, + timestamp, + qubit, + ax, + dec_bounds=None, + Fid_dict=None, + **kw): + fig = ax.get_figure() + # Fit 2D gaussians + from scipy.optimize import curve_fit + def twoD_Gaussian(data, amplitude, x0, y0, sigma_x, sigma_y, theta): + x, y = data + x0 = float(x0) + y0 = float(y0) + a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2) + b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2) + c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2) + g = amplitude*np.exp( - (a*((x-x0)**2) + 2*b*(x-x0)*(y-y0) + + c*((y-y0)**2))) + return g.ravel() + def _fit_2D_gaussian(X, Y): + counts, _x, _y = np.histogram2d(X, Y, bins=[100, 100], density=True) + x = (_x[:-1] + _x[1:]) / 2 + y = (_y[:-1] + _y[1:]) / 2 + _x, _y = np.meshgrid(_x, _y) + x, y = np.meshgrid(x, y) + p0 = [counts.max(), np.mean(X), np.mean(Y), np.std(X), np.std(Y), 0] + popt, pcov = curve_fit(twoD_Gaussian, (x, y), counts.T.ravel(), p0=p0) + return popt + popt_0 = _fit_2D_gaussian(shots_0[:,0], shots_0[:,1]) + popt_1 = _fit_2D_gaussian(shots_1[:,0], shots_1[:,1]) + # Plot stuff + ax.plot(shots_0[:,0], shots_0[:,1], '.', color='C0', alpha=0.05) + ax.plot(shots_1[:,0], shots_1[:,1], '.', color='C3', alpha=0.05) + ax.plot([0, popt_0[1]], [0, popt_0[2]], '--', color='k', lw=.5) + ax.plot([0, popt_1[1]], [0, popt_1[2]], '--', color='k', lw=.5) + ax.plot(popt_0[1], popt_0[2], '.', color='C0', label='ground') + ax.plot(popt_1[1], popt_1[2], '.', color='C3', label='excited') + ax.plot(popt_0[1], popt_0[2], 'x', color='white') + ax.plot(popt_1[1], popt_1[2], 'x', color='white') + # Draw 4sigma ellipse around mean + from matplotlib.patches import Ellipse + circle_0 = Ellipse((popt_0[1], popt_0[2]), + width=4*popt_0[3], height=4*popt_0[4], + angle=-popt_0[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + ax.add_patch(circle_0) + circle_1 = Ellipse((popt_1[1], popt_1[2]), + width=4*popt_1[3], height=4*popt_1[4], + angle=-popt_1[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + ax.add_patch(circle_1) + _all_shots = np.concatenate((shots_0, shots_1)) + if type(shots_2) != type(None): + popt_2 = _fit_2D_gaussian(shots_2[:,0], shots_2[:,1]) + ax.plot(shots_2[:,0], shots_2[:,1], '.', color='C2', alpha=0.05) + ax.plot([0, popt_2[1]], [0, popt_2[2]], '--', color='k', lw=.5) + ax.plot(popt_2[1], popt_2[2], '.', color='C2', label='$2^\mathrm{nd}$ excited') + ax.plot(popt_2[1], popt_2[2], 'x', color='white') + # Draw 4sigma ellipse around mean + circle_2 = Ellipse((popt_2[1], popt_2[2]), + width=4*popt_2[3], height=4*popt_2[4], + angle=-popt_2[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + ax.add_patch(circle_2) + _all_shots = np.concatenate((_all_shots, shots_2)) + if type(shots_3) != type(None): + popt_3 = _fit_2D_gaussian(shots_3[:,0], shots_3[:,1]) + ax.plot(shots_3[:,0], shots_3[:,1], '.', color='gold', alpha=0.05) + ax.plot([0, popt_3[1]], [0, popt_3[2]], '--', color='k', lw=.5) + ax.plot(popt_3[1], popt_3[2], '.', color='gold', label='$3^\mathrm{rd}$ excited') + ax.plot(popt_3[1], popt_3[2], 'x', color='white') + # Draw 4sigma ellipse around mean + circle_3 = Ellipse((popt_3[1], popt_3[2]), + width=4*popt_3[3], height=4*popt_3[4], + angle=-popt_3[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + ax.add_patch(circle_3) + _all_shots = np.concatenate((_all_shots, shots_3)) + + _lim = np.max([ np.max(np.abs(_all_shots[:,0]))*1.1, np.max(np.abs(_all_shots[:,1]))*1.1 ]) + ax.set_xlim(-_lim, _lim) + ax.set_ylim(-_lim, _lim) + ax.legend(frameon=False) + ax.set_xlabel('Integrated voltage I') + ax.set_ylabel('Integrated voltage Q') + ax.set_title(f'{timestamp}\nIQ plot qubit {qubit}') + if dec_bounds: + # Plot decision boundary + _bounds = list(dec_bounds.keys()) + _bounds.remove('mean') + Lim_points = {} + for bound in _bounds: + dec_bounds['mean'] + _x0, _y0 = dec_bounds['mean'] + _x1, _y1 = dec_bounds[bound] + a = (_y1-_y0)/(_x1-_x0) + b = _y0 - a*_x0 + _xlim = 1e2*np.sign(_x1-_x0) + _ylim = a*_xlim + b + Lim_points[bound] = _xlim, _ylim + # Plot classifier zones + from matplotlib.patches import Polygon + # Plot 0 area + _points = [dec_bounds['mean'], Lim_points['01'], Lim_points['30']] + _patch = Polygon(_points, color='C0', alpha=0.2, lw=0) + ax.add_patch(_patch) + # Plot 1 area + _points = [dec_bounds['mean'], Lim_points['01'], Lim_points['12']] + _patch = Polygon(_points, color='C3', alpha=0.2, lw=0) + ax.add_patch(_patch) + # Plot 2 area + _points = [dec_bounds['mean'], Lim_points['23'], Lim_points['12']] + _patch = Polygon(_points, color='C2', alpha=0.2, lw=0) + ax.add_patch(_patch) + if type(shots_3) != type(None): + # Plot 3 area + _points = [dec_bounds['mean'], Lim_points['23'], Lim_points['30']] + _patch = Polygon(_points, color='gold', alpha=0.2, lw=0) + ax.add_patch(_patch) + for bound in _bounds: + _x0, _y0 = dec_bounds['mean'] + _x1, _y1 = Lim_points[bound] + ax.plot([_x0, _x1], [_y0, _y1], 'k--', lw=1) + if Fid_dict: + # Write fidelity textbox + text = '\n'.join(('Assignment fidelity:', + f'$F_g$ : {Fid_dict["0"]*100:.1f}%', + f'$F_e$ : {Fid_dict["1"]*100:.1f}%', + f'$F_f$ : {Fid_dict["2"]*100:.1f}%', + f'$F_h$ : {Fid_dict["3"]*100:.1f}%', + f'$F_\mathrm{"{avg}"}$ : {Fid_dict["avg"]*100:.1f}%')) + props = dict(boxstyle='round', facecolor='white', alpha=1) + ax.text(1.05, 1, text, transform=ax.transAxes, + verticalalignment='top', bbox=props) + +def ssro_IQ_projection_plotfn( + shots_0, + shots_1, + shots_2, + projection_01, + projection_12, + classifier, + dec_bounds, + Fid_dict, + timestamp, + qubit, + ax, + shots_3=None, + projection_02=None, + projection_23=None, + projection_30=None, + **kw): + fig = ax.get_figure() + axs = fig.get_axes() + # Fit 2D gaussians + from scipy.optimize import curve_fit + def twoD_Gaussian(data, amplitude, x0, y0, sigma_x, sigma_y, theta): + x, y = data + x0 = float(x0) + y0 = float(y0) + a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2) + b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2) + c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2) + g = amplitude*np.exp( - (a*((x-x0)**2) + 2*b*(x-x0)*(y-y0) + + c*((y-y0)**2))) + return g.ravel() + def _fit_2D_gaussian(X, Y): + counts, _x, _y = np.histogram2d(X, Y, bins=[100, 100], density=True) + x = (_x[:-1] + _x[1:]) / 2 + y = (_y[:-1] + _y[1:]) / 2 + _x, _y = np.meshgrid(_x, _y) + x, y = np.meshgrid(x, y) + p0 = [counts.max(), np.mean(X), np.mean(Y), np.std(X), np.std(Y), 0] + popt, pcov = curve_fit(twoD_Gaussian, (x, y), counts.T.ravel(), p0=p0) + return popt + popt_0 = _fit_2D_gaussian(shots_0[:,0], shots_0[:,1]) + popt_1 = _fit_2D_gaussian(shots_1[:,0], shots_1[:,1]) + popt_2 = _fit_2D_gaussian(shots_2[:,0], shots_2[:,1]) + if type(shots_3) != type(None): + popt_3 = _fit_2D_gaussian(shots_3[:,0], shots_3[:,1]) + # Plot stuff + axs[0].plot(shots_0[:,0], shots_0[:,1], '.', color='C0', alpha=0.05) + axs[0].plot(shots_1[:,0], shots_1[:,1], '.', color='C3', alpha=0.05) + axs[0].plot(shots_2[:,0], shots_2[:,1], '.', color='C2', alpha=0.05) + if type(shots_3) != type(None): + axs[0].plot(shots_3[:,0], shots_3[:,1], '.', color='gold', alpha=0.05) + axs[0].plot([0, popt_0[1]], [0, popt_0[2]], '--', color='k', lw=.5) + axs[0].plot([0, popt_1[1]], [0, popt_1[2]], '--', color='k', lw=.5) + axs[0].plot([0, popt_2[1]], [0, popt_2[2]], '--', color='k', lw=.5) + axs[0].plot(popt_0[1], popt_0[2], '.', color='C0', label='ground') + axs[0].plot(popt_1[1], popt_1[2], '.', color='C3', label='excited') + axs[0].plot(popt_2[1], popt_2[2], '.', color='C2', label='$2^\mathrm{nd}$ excited') + axs[0].plot(popt_0[1], popt_0[2], 'x', color='white') + axs[0].plot(popt_1[1], popt_1[2], 'x', color='white') + axs[0].plot(popt_2[1], popt_2[2], 'x', color='white') + # Draw 4sigma ellipse around mean + from matplotlib.patches import Ellipse + circle_0 = Ellipse((popt_0[1], popt_0[2]), + width=4*popt_0[3], height=4*popt_0[4], + angle=-popt_0[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[0].add_patch(circle_0) + circle_1 = Ellipse((popt_1[1], popt_1[2]), + width=4*popt_1[3], height=4*popt_1[4], + angle=-popt_1[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[0].add_patch(circle_1) + circle_2 = Ellipse((popt_2[1], popt_2[2]), + width=4*popt_2[3], height=4*popt_2[4], + angle=-popt_2[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[0].add_patch(circle_2) + _all_shots = np.concatenate((shots_0, shots_1, shots_2)) + if type(shots_3) != type(None): + popt_3 = _fit_2D_gaussian(shots_3[:,0], shots_3[:,1]) + axs[0].plot([0, popt_3[1]], [0, popt_3[2]], '--', color='k', lw=.5) + axs[0].plot(popt_3[1], popt_3[2], '.', color='gold', label='$3^\mathrm{rd}$ excited') + axs[0].plot(popt_3[1], popt_3[2], 'x', color='w') + # Draw 4sigma ellipse around mean + circle_3 = Ellipse((popt_3[1], popt_3[2]), + width=4*popt_3[3], height=4*popt_3[4], + angle=-popt_3[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[0].add_patch(circle_3) + _all_shots = np.concatenate((_all_shots, shots_3)) + # Plot classifier zones + from matplotlib.patches import Polygon + _lim = np.max([ np.max(np.abs(_all_shots[:,0]))*1.1, np.max(np.abs(_all_shots[:,1]))*1.1 ]) + axs[0].set_xlim(-_lim, _lim) + axs[0].set_ylim(-_lim, _lim) + # Plot decision boundary + _bounds = list(dec_bounds.keys()) + _bounds.remove('mean') + Lim_points = {} + for bound in _bounds: + dec_bounds['mean'] + _x0, _y0 = dec_bounds['mean'] + _x1, _y1 = dec_bounds[bound] + a = (_y1-_y0)/(_x1-_x0) + b = _y0 - a*_x0 + _xlim = 1e2*np.sign(_x1-_x0) + _ylim = a*_xlim + b + Lim_points[bound] = _xlim, _ylim + # Plot classifier zones + from matplotlib.patches import Polygon + if type(shots_3) != type(None): + boundaries = [('30', '01'), ('01', '12'), ('12', '23'), ('23', '30')] + colors = ['C0', 'C3', 'C2', 'gold'] + else: + boundaries = [('02', '01'), ('01', '12'), ('12', '02')] + colors = ['C0', 'C3', 'C2'] + for _bds, color in zip(boundaries, colors): + _points = [dec_bounds['mean'], Lim_points[_bds[0]], Lim_points[_bds[1]]] + _patch = Polygon(_points, color=color, alpha=0.2, lw=0) + axs[0].add_patch(_patch) + for bound in _bounds: + _x0, _y0 = dec_bounds['mean'] + _x1, _y1 = Lim_points[bound] + axs[0].plot([_x0, _x1], [_y0, _y1], 'k--', lw=1) + axs[0].legend(frameon=False) + axs[0].set_xlabel('Integrated voltage I') + axs[0].set_ylabel('Integrated voltage Q') + axs[0].set_title(f'IQ plot qubit {qubit}') + fig.suptitle(f'{timestamp}\n') + ########################## + # Plot projections + ########################## + # 01 projection + _bin_c = projection_01['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[1].bar(_bin_c, projection_01['h0'], bin_width, fc='C0', alpha=0.4) + axs[1].bar(_bin_c, projection_01['h1'], bin_width, fc='C3', alpha=0.4) + axs[1].plot(_bin_c, double_gauss(_bin_c, *projection_01['popt0']), '-C0') + axs[1].plot(_bin_c, double_gauss(_bin_c, *projection_01['popt1']), '-C3') + axs[1].axvline(projection_01['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_01["Fid"]*100:.1f}%', + f'SNR : {projection_01["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[1].text(.775, .9, text, transform=axs[1].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[1].text(projection_01['popt0'][0], projection_01['popt0'][4]/2, + r'$|g\rangle$', ha='center', va='center', color='C0') + axs[1].text(projection_01['popt1'][0], projection_01['popt1'][4]/2, + r'$|e\rangle$', ha='center', va='center', color='C3') + axs[1].set_xticklabels([]) + axs[1].set_xlim(_bin_c[0], _bin_c[-1]) + axs[1].set_ylim(bottom=0) + axs[1].set_title('Projection of data') + # 12 projection + _bin_c = projection_12['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[2].bar(_bin_c, projection_12['h1'], bin_width, fc='C3', alpha=0.4) + axs[2].bar(_bin_c, projection_12['h2'], bin_width, fc='C2', alpha=0.4) + axs[2].plot(_bin_c, double_gauss(_bin_c, *projection_12['popt1']), '-C3') + axs[2].plot(_bin_c, double_gauss(_bin_c, *projection_12['popt2']), '-C2') + axs[2].axvline(projection_12['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_12["Fid"]*100:.1f}%', + f'SNR : {projection_12["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[2].text(.775, .9, text, transform=axs[2].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[2].text(projection_12['popt1'][0], projection_12['popt1'][4]/2, + r'$|e\rangle$', ha='center', va='center', color='C3') + axs[2].text(projection_12['popt2'][0], projection_12['popt2'][4]/2, + r'$|f\rangle$', ha='center', va='center', color='C2') + axs[2].set_xticklabels([]) + axs[2].set_xlim(_bin_c[0], _bin_c[-1]) + axs[2].set_ylim(bottom=0) + if projection_02: + # 02 projection + _bin_c = projection_02['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[3].bar(_bin_c, projection_02['h0'], bin_width, fc='C0', alpha=0.4) + axs[3].bar(_bin_c, projection_02['h2'], bin_width, fc='C2', alpha=0.4) + axs[3].plot(_bin_c, double_gauss(_bin_c, *projection_02['popt0']), '-C0') + axs[3].plot(_bin_c, double_gauss(_bin_c, *projection_02['popt2']), '-C2') + axs[3].axvline(projection_02['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_02["Fid"]*100:.1f}%', + f'SNR : {projection_02["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[3].text(.775, .9, text, transform=axs[3].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[3].text(projection_02['popt0'][0], projection_02['popt0'][4]/2, + r'$|g\rangle$', ha='center', va='center', color='C0') + axs[3].text(projection_02['popt2'][0], projection_02['popt2'][4]/2, + r'$|f\rangle$', ha='center', va='center', color='C2') + axs[3].set_xticklabels([]) + axs[3].set_xlim(_bin_c[0], _bin_c[-1]) + axs[3].set_ylim(bottom=0) + axs[3].set_xlabel('Integrated voltage') + if projection_23: + # 23 projection + _bin_c = projection_23['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[3].bar(_bin_c, projection_23['h3'], bin_width, fc='gold', alpha=0.4) + axs[3].bar(_bin_c, projection_23['h2'], bin_width, fc='C2', alpha=0.4) + axs[3].plot(_bin_c, double_gauss(_bin_c, *projection_23['popt3']), '-', color='gold') + axs[3].plot(_bin_c, double_gauss(_bin_c, *projection_23['popt2']), '-C2') + axs[3].axvline(projection_23['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_23["Fid"]*100:.1f}%', + f'SNR : {projection_23["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[3].text(.775, .9, text, transform=axs[3].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[3].text(projection_23['popt3'][0], projection_23['popt3'][4]/2, + r'$|h\rangle$', ha='center', va='center', color='gold') + axs[3].text(projection_23['popt2'][0], projection_23['popt2'][4]/2, + r'$|f\rangle$', ha='center', va='center', color='C2') + axs[3].set_xticklabels([]) + axs[3].set_xlim(_bin_c[0], _bin_c[-1]) + axs[3].set_ylim(bottom=0) + if projection_30: + # 23 projection + _bin_c = projection_30['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[4].bar(_bin_c, projection_30['h3'], bin_width, fc='gold', alpha=0.4) + axs[4].bar(_bin_c, projection_30['h0'], bin_width, fc='C0', alpha=0.4) + axs[4].plot(_bin_c, double_gauss(_bin_c, *projection_30['popt3']), '-', color='gold') + axs[4].plot(_bin_c, double_gauss(_bin_c, *projection_30['popt0']), '-C0') + axs[4].axvline(projection_30['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_30["Fid"]*100:.1f}%', + f'SNR : {projection_30["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[4].text(.775, .9, text, transform=axs[4].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[4].text(projection_30['popt3'][0], projection_30['popt3'][4]/2, + r'$|h\rangle$', ha='center', va='center', color='gold') + axs[4].text(projection_30['popt0'][0], projection_30['popt0'][4]/2, + r'$|g\rangle$', ha='center', va='center', color='C0') + axs[4].set_xticklabels([]) + axs[4].set_xlim(_bin_c[0], _bin_c[-1]) + axs[4].set_ylim(bottom=0) + axs[4].set_xlabel('Integrated voltage') + + # Write fidelity textbox + text = '\n'.join(('Assignment fidelity:', + f'$F_g$ : {Fid_dict["0"]*100:.1f}%', + f'$F_e$ : {Fid_dict["1"]*100:.1f}%', + f'$F_f$ : {Fid_dict["2"]*100:.1f}%')) + if type(shots_3) != type(None): + text += f'\n$F_h$ : {Fid_dict["3"]*100:.1f}%' + text += f'\n$F_\\mathrm{"{avg}"}$ : {Fid_dict["avg"]*100:.1f}%' + props = dict(boxstyle='round', facecolor='w', alpha=1) + axs[1].text(1.05, 1, text, transform=axs[1].transAxes, + verticalalignment='top', bbox=props) + +def leakage_hist_plotfn( + shots_0, + shots_1, + shots_2, + shots_lru, + classifier, + dec_bounds, + pop_vec, + qoi, + timestamp, + qubit, + ax, + shots_3=None, + state='2', + **kw): + fig = ax.get_figure() + # Fit 2D gaussians + from scipy.optimize import curve_fit + def twoD_Gaussian(data, amplitude, x0, y0, sigma_x, sigma_y, theta): + x, y = data + x0 = float(x0) + y0 = float(y0) + a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2) + b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2) + c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2) + g = amplitude*np.exp( - (a*((x-x0)**2) + 2*b*(x-x0)*(y-y0) + + c*((y-y0)**2))) + return g.ravel() + def _fit_2D_gaussian(X, Y): + counts, _x, _y = np.histogram2d(X, Y, bins=[100, 100], density=True) + x = (_x[:-1] + _x[1:]) / 2 + y = (_y[:-1] + _y[1:]) / 2 + _x, _y = np.meshgrid(_x, _y) + x, y = np.meshgrid(x, y) + p0 = [counts.max(), np.mean(X), np.mean(Y), np.std(X), np.std(Y), 0] + popt, pcov = curve_fit(twoD_Gaussian, (x, y), counts.T.ravel(), p0=p0) + return popt + popt_0 = _fit_2D_gaussian(shots_0[:,0], shots_0[:,1]) + popt_1 = _fit_2D_gaussian(shots_1[:,0], shots_1[:,1]) + popt_2 = _fit_2D_gaussian(shots_2[:,0], shots_2[:,1]) + if type(shots_3) != type(None): + popt_3 = _fit_2D_gaussian(shots_3[:,0], shots_3[:,1]) + ax.plot(shots_lru[:,0], shots_lru[:,1], '.', color='C0', alpha=1, markersize=1) + ax.plot([0, popt_0[1]], [0, popt_0[2]], '--', color='k', lw=.5) + ax.plot([0, popt_1[1]], [0, popt_1[2]], '--', color='k', lw=.5) + ax.plot([0, popt_2[1]], [0, popt_2[2]], '--', color='k', lw=.5) + ax.plot(popt_0[1], popt_0[2], '.', color='C0', label='ground') + ax.plot(popt_1[1], popt_1[2], '.', color='C3', label='excited') + ax.plot(popt_2[1], popt_2[2], '.', color='C2', label='$2^\mathrm{nd}$ excited') + ax.plot(popt_0[1], popt_0[2], 'o', color='w') # change for ket state + ax.plot(popt_1[1], popt_1[2], 'o', color='w') # change for ket state + ax.plot(popt_2[1], popt_2[2], 'o', color='w') # change for ket state + # Draw 4sigma ellipse around mean + from matplotlib.patches import Ellipse + circle_0 = Ellipse((popt_0[1], popt_0[2]), + width=4*popt_0[3], height=4*popt_0[4], + angle=-popt_0[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + ax.add_patch(circle_0) + circle_1 = Ellipse((popt_1[1], popt_1[2]), + width=4*popt_1[3], height=4*popt_1[4], + angle=-popt_1[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + ax.add_patch(circle_1) + circle_2 = Ellipse((popt_2[1], popt_2[2]), + width=4*popt_2[3], height=4*popt_2[4], + angle=-popt_2[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + ax.add_patch(circle_2) + _all_shots = np.concatenate((shots_0, shots_1, shots_2)) + if type(shots_3) != type(None): + popt_3 = _fit_2D_gaussian(shots_3[:,0], shots_3[:,1]) + ax.plot([0, popt_3[1]], [0, popt_3[2]], '--', color='k', lw=.5) + ax.plot(popt_3[1], popt_3[2], '.', color='gold', label='$3^\mathrm{rd}$ excited') + ax.plot(popt_3[1], popt_3[2], 'o', color='w') + # Draw 4sigma ellipse around mean + circle_3 = Ellipse((popt_3[1], popt_3[2]), + width=4*popt_3[3], height=4*popt_3[4], + angle=-popt_3[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + ax.add_patch(circle_3) + _all_shots = np.concatenate((_all_shots, shots_3)) + _lim = np.max([ np.max(np.abs(_all_shots[:,0]))*1.1, np.max(np.abs(_all_shots[:,1]))*1.1 ]) + ax.set_xlim(-_lim, _lim) + ax.set_ylim(-_lim, _lim) + ax.legend(frameon=False) + ax.set_xlabel('Integrated voltage I') + ax.set_ylabel('Integrated voltage Q') + ax.set_title(f'{timestamp}\nIQ plot qubit {qubit}') + # Plot decision boundary + _bounds = list(dec_bounds.keys()) + _bounds.remove('mean') + Lim_points = {} + for bound in _bounds: + dec_bounds['mean'] + _x0, _y0 = dec_bounds['mean'] + _x1, _y1 = dec_bounds[bound] + a = (_y1-_y0)/(_x1-_x0) + b = _y0 - a*_x0 + _xlim = 1e2*np.sign(_x1-_x0) + _ylim = a*_xlim + b + Lim_points[bound] = _xlim, _ylim + # Plot classifier zones + from matplotlib.patches import Polygon + if type(shots_3) != type(None): + boundaries = [('30', '01'), ('01', '12'), ('12', '23'), ('23', '30')] + colors = ['C0', 'C3', 'C2', 'gold'] + else: + boundaries = [('02', '01'), ('01', '12'), ('12', '02')] + colors = ['C0', 'C3', 'C2'] + for _bds, color in zip(boundaries, colors): + _points = [dec_bounds['mean'], Lim_points[_bds[0]], Lim_points[_bds[1]]] + _patch = Polygon(_points, color=color, alpha=0.2, lw=0) + ax.add_patch(_patch) + for bound in _bounds: + _x0, _y0 = dec_bounds['mean'] + _x1, _y1 = Lim_points[bound] + ax.plot([_x0, _x1], [_y0, _y1], 'k--', lw=1) + + text = '\n'.join(('State population:', + '$P_\\mathrm{|0\\rangle}=$'+f'{pop_vec[0]*100:.2f}%', + '$P_\\mathrm{|1\\rangle}=$'+f'{pop_vec[1]*100:.2f}%', + '$P_\\mathrm{|2\\rangle}=$'+f'{pop_vec[2]*100:.2f}%', + ' ' if len(pop_vec)==3 else \ + '$P_\\mathrm{|3\\rangle}=$'+f'{pop_vec[3]*100:.2f}%' , + f'$|{state}\\rangle$'+f' removal fraction:\n\n\n')) + if 'removal_fraction_gauss_fit' in qoi: + text += '\n'+r'$|2\rangle$'+f' removal fraction\nfrom 3-gauss. fit:\n\n\n' + props = dict(boxstyle='round', facecolor='white', alpha=1) + ax.text(1.05, .95, text, transform=ax.transAxes, + verticalalignment='top', bbox=props, fontsize=9) + if state == '2': + removal_fraction = qoi['removal_fraction'] + else: + removal_fraction = qoi['removal_fraction_h'] + text = f'{removal_fraction*100:.1f}%' + ax.text(1.05, .48, text, transform=ax.transAxes, + verticalalignment='top', fontsize=24) + if 'removal_fraction_gauss_fit' in qoi: + text = f'{qoi["removal_fraction_gauss_fit"]*100:.1f}%' + ax.text(1.05, .25, text, transform=ax.transAxes, + verticalalignment='top', fontsize=24) + ax.set_xlabel('Integrated voltage I') + ax.set_ylabel('Integrated voltage Q') + ax.set_title(f'{timestamp}\n{state}-state removal fraction qubit {qubit}') + +def gauss_fit2D_plotfn( + fit_dict, + timestamp, + qubit, + ax, **kw): + fig = ax.get_figure() + axs = fig.get_axes() + + axs = np.array(axs)[[0,2,1]] + + xbins, ybins = fit_dict['xbins'], fit_dict['ybins'] + xbins_c, ybins_c = (xbins[1:]+xbins[:-1])/2, (ybins[1:]+ybins[:-1])/2 + _X, _Y = np.meshgrid(xbins_c, ybins_c) + # Plot measured histograms + n_3_fit = triple_gauss(_X, _Y, *fit_dict['popt3']) + axs[0].pcolormesh(xbins, ybins, fit_dict['n3'], alpha=1, vmax=2) + # Draw 4sigma ellipse around mean + for i in range(3): + # Plot center of distributions + axs[0].plot(fit_dict[f'popt{i}'][0], fit_dict[f'popt{i}'][3], 'C3x') + axs[1].plot(fit_dict[f'popt{i}'][0], fit_dict[f'popt{i}'][3], 'C3x') + # Draw 4sigma ellipse around mean + circle = patches.Ellipse((fit_dict[f'popt{i}'][0], fit_dict[f'popt{i}'][3]), + width=4*fit_dict[f'popt{i}'][6], height=4*fit_dict[f'popt{i}'][9], + angle=0,#-popt_0[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[1].add_patch(circle) + # Plot fitted histograms + axs[1].pcolormesh(xbins, ybins, n_3_fit, alpha=1, vmax=1) + # Plot center of distributions + axs[1].plot(fit_dict['popt0'][0], fit_dict['popt0'][3], 'C3x') + axs[1].plot(fit_dict['popt1'][0], fit_dict['popt1'][3], 'C3x') + axs[1].plot(fit_dict['popt2'][0], fit_dict['popt2'][3], 'C3x') + axs[0].set_xlabel('Integrated voltage, $I$') + axs[0].set_ylabel('Integrated voltage, $Q$') + axs[1].set_xlabel('Integrated voltage, $I$') + axs[0].set_title('Measured shots') + axs[1].set_title('Three gaussian mixture fit') + # 3D plot + cmap = matplotlib.colors.LinearSegmentedColormap.from_list('', ([(0, '#440154'), (.01,'#3b528b'), + (.02,'#21918c'), (.3, '#fde725'), (1, 'w')])) + norm = matplotlib.colors.Normalize(vmin=np.min(n_3_fit), vmax=np.max(n_3_fit)) + axs[2].axes.axes.set_position((.33, .15, .9, .8)) + axs[2].patch.set_visible(False) + surf = axs[2].plot_surface(_X, _Y, n_3_fit, cmap=cmap, alpha=1, + linewidth=0, antialiased=True) + axs[2].set_xticks([-1,-.5, 0, .5, 1]) + axs[2].set_xticklabels(['-1', '', '0', '', '1']) + axs[2].set_yticks([-1,-.5, 0, .5, 1]) + axs[2].set_yticklabels(['-1', '', '0', '', '1']) + axs[2].tick_params(axis='x', which='major', pad=-5) + axs[2].tick_params(axis='y', which='major', pad=-5) + axs[2].set_xlim(xbins[0], xbins[-1]) + axs[2].set_ylim(ybins[0], ybins[-1]) + axs[2].set_xlabel('Voltage, $I$', labelpad=-5) + axs[2].set_ylabel('Voltage, $Q$', labelpad=-5) + axs[1].text(1.2, 1.025, 'Three gaussian mixture 3D plot', transform=axs[1].transAxes, size=12) + # horizontal colorbar + from mpl_toolkits.axes_grid1.inset_locator import inset_axes + cbar_ax = fig.add_axes([.67, .12, .22, .02]) + sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) + cb = fig.colorbar(sm, cax=cbar_ax, orientation='horizontal') + text = '\n'.join((r'$P_{|0\rangle}$'+f' = {fit_dict["qoi"]["P_0g"]*100:.2f}%', + r'$P_{|1\rangle}$'+f' = {fit_dict["qoi"]["P_1g"]*100:.2f}%', + r'$P_{|2\rangle}$'+f' = {fit_dict["qoi"]["P_2g"]*100:.2f}%')) + props = dict(boxstyle='round', facecolor='white', alpha=1) + axs[1].text(1.05, .95, text, transform=axs[1].transAxes, + verticalalignment='top', bbox=props, fontsize=11, zorder=100) + fig.suptitle(f'{timestamp}\n2-state removal fraction qubit {qubit}', y=1.05) + +def assignment_matrix_plotfn( + M, + qubit, + timestamp, + ax, **kw): + fig = ax.get_figure() + im = ax.imshow(M, cmap=plt.cm.Reds, vmin=0, vmax=1) + n = len(M) + for i in range(n): + for j in range(n): + c = M[j,i] + if abs(c) > .5: + ax.text(i, j, '{:.2f}'.format(c), va='center', ha='center', + color = 'white') + else: + ax.text(i, j, '{:.2f}'.format(c), va='center', ha='center') + ax.set_xticks(np.arange(n)) + ax.set_xticklabels([f'$|{i}\\rangle$' for i in range(n)]) + ax.set_xlabel('Assigned state') + ax.set_yticks(np.arange(n)) + ax.set_yticklabels([f'$|{i}\\rangle$' for i in range(n)]) + ax.set_ylabel('Prepared state') + name = qubit + if n==3: + name = 'Qutrit' + elif n==4: + name = 'Ququat' + ax.set_title(f'{timestamp}\n{name} assignment matrix qubit {qubit}') + cbar_ax = fig.add_axes([.95, .15, .03, .7]) + cb = fig.colorbar(im, cax=cbar_ax) + cb.set_label('assignment probability') + + +class Repeated_LRU_experiment_Analysis(ba.BaseDataAnalysis): + """ + Analysis for LRU experiment. + """ + def __init__(self, + qubit: str, + rounds: int, + heralded_init: bool = False, + h_state: bool = False, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + auto=True + ): + + super().__init__(t_start=t_start, t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) + + self.qubit = qubit + self.rounds = rounds + self.heralded_init = heralded_init + self.h_state = h_state + if auto: + self.run_analysis() + + def extract_data(self): + """ + This is a new style (sept 2019) data extraction. + This could at some point move to a higher level class. + """ + self.get_timestamps() + self.timestamp = self.timestamps[0] + data_fp = get_datafilepath_from_timestamp(self.timestamp) + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + self.raw_data_dict = h5d.extract_pars_from_datafile( + data_fp, param_spec) + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + + def process_data(self): + # Perform measurement post-selection + _cycle = 2*self.rounds+4 + if self.h_state: + _cycle += 1 + if self.heralded_init: + _cycle *= 2 + ############################################ + # Rotate shots in IQ plane + ############################################ + # Sort shots + _raw_shots = self.raw_data_dict['data'][:,1:] + if self.heralded_init: + _shots_0 = _raw_shots[2*self.rounds+1::_cycle] + _shots_1 = _raw_shots[2*self.rounds+3::_cycle] + _shots_2 = _raw_shots[2*self.rounds+5::_cycle] + _shots_lru = _raw_shots[2*self.rounds+7::_cycle] + if self.h_state: + _shots_3 = _raw_shots[2*self.rounds+9::_cycle] + else: + _shots_0 = _raw_shots[2*self.rounds+0::_cycle] + _shots_1 = _raw_shots[2*self.rounds+1::_cycle] + _shots_2 = _raw_shots[2*self.rounds+2::_cycle] + _shots_lru = _raw_shots[2*self.rounds+3::_cycle] + if self.h_state: + _shots_3 = _raw_shots[2*self.rounds+4::_cycle] + # Save raw shots + self.proc_data_dict['shots_0_IQ'] = _shots_0 + self.proc_data_dict['shots_1_IQ'] = _shots_1 + self.proc_data_dict['shots_2_IQ'] = _shots_2 + self.proc_data_dict['shots_lru_IQ'] = _shots_lru + if self.h_state: + self.proc_data_dict['shots_3_IQ'] = _shots_3 + # Rotate data + center_0 = np.array([np.mean(_shots_0[:,0]), np.mean(_shots_0[:,1])]) + center_1 = np.array([np.mean(_shots_1[:,0]), np.mean(_shots_1[:,1])]) + center_2 = np.array([np.mean(_shots_2[:,0]), np.mean(_shots_2[:,1])]) + raw_shots = rotate_and_center_data(_raw_shots[:,0], _raw_shots[:,1], center_0, center_1) + ##################################################### + # From this point onward raw shots has shape + # (nr_shots, nr_quadratures). + # Post select based on heralding measurement result. + ##################################################### + if self.heralded_init: + raise NotImplementedError('Not implemented yet.') + # # estimate post-selection threshold + # shots_0 = raw_shots[1::_cycle, 0] + # shots_1 = raw_shots[3::_cycle, 0] + # ps_th = (np.mean(shots_0)+np.mean(shots_1))/2 + # # Sort heralding shots from experiment shots + # ps_shots = raw_shots[0::2,0] # only I quadrature needed for postselection + # exp_shots = raw_shots[1::2] # Here we want to keep both quadratures + # # create post-selection mask + # _mask = [ 1 if s contains post-selected + # shots of state and has shape (nr_ps_shots, nr_quadtrs). + # Next we will analyze shots projected along axis and + # therefore use a single quadrature. shots_ will be used + # to denote that array of shots. + ############################################################## + self.qoi = {} + if self.h_state: + ############################################ + # Use classifier to assign states in the + # IQ plane and calculate qutrit fidelity. + ############################################ + # Parse data for classifier + data = np.concatenate((Shots_0, Shots_1, Shots_2, Shots_3)) + labels = [0 for s in Shots_0]+[1 for s in Shots_1]+\ + [2 for s in Shots_2]+[3 for s in Shots_3] + from sklearn.discriminant_analysis import LinearDiscriminantAnalysis + clf = LinearDiscriminantAnalysis() + clf.fit(data, labels) + dec_bounds = _decision_boundary_points(clf.coef_, clf.intercept_) + Fid_dict = {} + for state, shots in zip([ '0', '1', '2', '3'], + [Shots_0, Shots_1, Shots_2, Shots_3]): + _res = clf.predict(shots) + _fid = np.mean(_res == int(state)) + Fid_dict[state] = _fid + Fid_dict['avg'] = np.mean([f for f in Fid_dict.values()]) + # Get assignment fidelity matrix + M = np.zeros((4,4)) + for i, shots in enumerate([Shots_0, Shots_1, Shots_2, Shots_3]): + for j, state in enumerate(['0', '1', '2', '3']): + _res = clf.predict(shots) + M[i][j] = np.mean(_res == int(state)) + # Get leakage removal fraction + _res = clf.predict(Shots_lru) + _vec = np.array([np.mean(_res == int('0')), + np.mean(_res == int('1')), + np.mean(_res == int('2')), + np.mean(_res == int('3'))]) + M_inv = np.linalg.inv(M) + pop_vec = np.dot(_vec, M_inv) + self.proc_data_dict['classifier'] = clf + self.proc_data_dict['dec_bounds'] = dec_bounds + self.proc_data_dict['Fid_dict'] = Fid_dict + self.qoi['Fid_dict'] = Fid_dict + self.qoi['Assignment_matrix'] = M + self.qoi['pop_vec'] = pop_vec + self.qoi['removal_fraction'] = 1-pop_vec[2] + else: + ############################################ + # Use classifier to assign states in the + # IQ plane and calculate qutrit fidelity. + ############################################ + # Parse data for classifier + data = np.concatenate((Shots_0, Shots_1, Shots_2)) + labels = [0 for s in Shots_0]+[1 for s in Shots_1]+[2 for s in Shots_2] + from sklearn.discriminant_analysis import LinearDiscriminantAnalysis + clf = LinearDiscriminantAnalysis() + clf.fit(data, labels) + dec_bounds = _decision_boundary_points(clf.coef_, clf.intercept_) + Fid_dict = {} + for state, shots in zip([ '0', '1', '2'], + [Shots_0, Shots_1, Shots_2]): + _res = clf.predict(shots) + _fid = np.mean(_res == int(state)) + Fid_dict[state] = _fid + Fid_dict['avg'] = np.mean([f for f in Fid_dict.values()]) + # Get assignment fidelity matrix + M = np.zeros((3,3)) + for i, shots in enumerate([Shots_0, Shots_1, Shots_2]): + for j, state in enumerate(['0', '1', '2']): + _res = clf.predict(shots) + M[i][j] = np.mean(_res == int(state)) + # Get leakage removal fraction + _res = clf.predict(Shots_lru) + _vec = np.array([np.mean(_res == int('0')), + np.mean(_res == int('1')), + np.mean(_res == int('2'))]) + M_inv = np.linalg.inv(M) + pop_vec = np.dot(_vec, M_inv) + # Make it a 4x4 matrix + M = np.append(M, [[0,0,0]], 0) + M = np.append(M, [[0],[0],[0],[1]], 1) + self.proc_data_dict['classifier'] = clf + self.proc_data_dict['dec_bounds'] = dec_bounds + self.proc_data_dict['Fid_dict'] = Fid_dict + self.qoi['Fid_dict'] = Fid_dict + self.qoi['Assignment_matrix'] = M + self.qoi['pop_vec'] = pop_vec + self.qoi['removal_fraction'] = 1-pop_vec[2] + ######################################### + # Project data along axis perpendicular + # to the decision boundaries. + ######################################### + ############################ + # Projection along 10 axis. + ############################ + # Rotate shots over 01 decision boundary axis + shots_0 = rotate_and_center_data(Shots_0[:,0],Shots_0[:,1], dec_bounds['mean'], dec_bounds['01'], phi=np.pi/2) + shots_1 = rotate_and_center_data(Shots_1[:,0],Shots_1[:,1], dec_bounds['mean'], dec_bounds['01'], phi=np.pi/2) + # Take relavant quadrature + shots_0 = shots_0[:,0] + shots_1 = shots_1[:,0] + n_shots_1 = len(shots_1) + # find range + _all_shots = np.concatenate((shots_0, shots_1)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x0, n0 = np.unique(shots_0, return_counts=True) + x1, n1 = np.unique(shots_1, return_counts=True) + Fid_01, threshold_01 = _calculate_fid_and_threshold(x0, n0, x1, n1) + # Histogram of shots for 1 and 2 + h0, bin_edges = np.histogram(shots_0, bins=100, range=_range) + h1, bin_edges = np.histogram(shots_1, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt0, popt1, params_01 = _fit_double_gauss(bin_centers, h0, h1) + # Save processed data + self.proc_data_dict['projection_01'] = {} + self.proc_data_dict['projection_01']['h0'] = h0 + self.proc_data_dict['projection_01']['h1'] = h1 + self.proc_data_dict['projection_01']['bin_centers'] = bin_centers + self.proc_data_dict['projection_01']['popt0'] = popt0 + self.proc_data_dict['projection_01']['popt1'] = popt1 + self.proc_data_dict['projection_01']['SNR'] = params_01['SNR'] + self.proc_data_dict['projection_01']['Fid'] = Fid_01 + self.proc_data_dict['projection_01']['threshold'] = threshold_01 + ############################ + # Projection along 12 axis. + ############################ + # Rotate shots over 12 decision boundary axis + shots_1 = rotate_and_center_data(Shots_1[:,0],Shots_1[:,1],dec_bounds['mean'], dec_bounds['12'], phi=np.pi/2) + shots_2 = rotate_and_center_data(Shots_2[:,0],Shots_2[:,1],dec_bounds['mean'], dec_bounds['12'], phi=np.pi/2) + # Take relavant quadrature + shots_1 = shots_1[:,0] + shots_2 = shots_2[:,0] + n_shots_2 = len(shots_2) + # find range + _all_shots = np.concatenate((shots_1, shots_2)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x1, n1 = np.unique(shots_1, return_counts=True) + x2, n2 = np.unique(shots_2, return_counts=True) + Fid_12, threshold_12 = _calculate_fid_and_threshold(x1, n1, x2, n2) + # Histogram of shots for 1 and 2 + h1, bin_edges = np.histogram(shots_1, bins=100, range=_range) + h2, bin_edges = np.histogram(shots_2, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt1, popt2, params_12 = _fit_double_gauss(bin_centers, h1, h2) + # Save processed data + self.proc_data_dict['projection_12'] = {} + self.proc_data_dict['projection_12']['h1'] = h1 + self.proc_data_dict['projection_12']['h2'] = h2 + self.proc_data_dict['projection_12']['bin_centers'] = bin_centers + self.proc_data_dict['projection_12']['popt1'] = popt1 + self.proc_data_dict['projection_12']['popt2'] = popt2 + self.proc_data_dict['projection_12']['SNR'] = params_12['SNR'] + self.proc_data_dict['projection_12']['Fid'] = Fid_12 + self.proc_data_dict['projection_12']['threshold'] = threshold_12 + if not self.h_state: + ############################ + # Projection along 02 axis. + ############################ + # Rotate shots over 02 decision boundary axis + shots_0 = rotate_and_center_data(Shots_0[:,0],Shots_0[:,1],dec_bounds['mean'],dec_bounds['02'], phi=np.pi/2) + shots_2 = rotate_and_center_data(Shots_2[:,0],Shots_2[:,1],dec_bounds['mean'],dec_bounds['02'], phi=np.pi/2) + # Take relavant quadrature + shots_0 = shots_0[:,0] + shots_2 = shots_2[:,0] + n_shots_2 = len(shots_2) + # find range + _all_shots = np.concatenate((shots_0, shots_2)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x0, n0 = np.unique(shots_0, return_counts=True) + x2, n2 = np.unique(shots_2, return_counts=True) + Fid_02, threshold_02 = _calculate_fid_and_threshold(x0, n0, x2, n2) + # Histogram of shots for 1 and 2 + h0, bin_edges = np.histogram(shots_0, bins=100, range=_range) + h2, bin_edges = np.histogram(shots_2, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt0, popt2, params_02 = _fit_double_gauss(bin_centers, h0, h2) + # Save processed data + self.proc_data_dict['projection_02'] = {} + self.proc_data_dict['projection_02']['h0'] = h0 + self.proc_data_dict['projection_02']['h2'] = h2 + self.proc_data_dict['projection_02']['bin_centers'] = bin_centers + self.proc_data_dict['projection_02']['popt0'] = popt0 + self.proc_data_dict['projection_02']['popt2'] = popt2 + self.proc_data_dict['projection_02']['SNR'] = params_02['SNR'] + self.proc_data_dict['projection_02']['Fid'] = Fid_02 + self.proc_data_dict['projection_02']['threshold'] = threshold_02 + else: + ############################ + # Projection along 23 axis. + ############################ + # Rotate shots over 23 decision boundary axis + shots_2 = rotate_and_center_data(Shots_2[:,0],Shots_2[:,1],dec_bounds['mean'],dec_bounds['23'], phi=np.pi/2) + shots_3 = rotate_and_center_data(Shots_3[:,0],Shots_3[:,1],dec_bounds['mean'],dec_bounds['23'], phi=np.pi/2) + # Take relavant quadrature + shots_3 = shots_3[:,0] + shots_2 = shots_2[:,0] + n_shots_2 = len(shots_2) + # find range + _all_shots = np.concatenate((shots_3, shots_2)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x3, n3 = np.unique(shots_3, return_counts=True) + x2, n2 = np.unique(shots_2, return_counts=True) + Fid_23, threshold_23 = _calculate_fid_and_threshold(x3, n3, x2, n2) + # Histogram of shots for 1 and 2 + h3, bin_edges = np.histogram(shots_3, bins=100, range=_range) + h2, bin_edges = np.histogram(shots_2, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt3, popt2, params_23 = _fit_double_gauss(bin_centers, h3, h2) + # Save processed data + self.proc_data_dict['projection_23'] = {} + self.proc_data_dict['projection_23']['h3'] = h3 + self.proc_data_dict['projection_23']['h2'] = h2 + self.proc_data_dict['projection_23']['bin_centers'] = bin_centers + self.proc_data_dict['projection_23']['popt3'] = popt3 + self.proc_data_dict['projection_23']['popt2'] = popt2 + self.proc_data_dict['projection_23']['SNR'] = params_23['SNR'] + self.proc_data_dict['projection_23']['Fid'] = Fid_23 + self.proc_data_dict['projection_23']['threshold'] = threshold_23 + ############################ + # Projection along 30 axis. + ############################ + # Rotate shots over 30 decision boundary axis + shots_0 = rotate_and_center_data(Shots_0[:,0],Shots_0[:,1],dec_bounds['mean'],dec_bounds['30'], phi=np.pi/2) + shots_3 = rotate_and_center_data(Shots_3[:,0],Shots_3[:,1],dec_bounds['mean'],dec_bounds['30'], phi=np.pi/2) + # Take relavant quadrature + shots_3 = shots_3[:,0] + shots_0 = shots_0[:,0] + n_shots_3 = len(shots_3) + # find range + _all_shots = np.concatenate((shots_3, shots_0)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x3, n3 = np.unique(shots_3, return_counts=True) + x0, n0 = np.unique(shots_0, return_counts=True) + Fid_30, threshold_30 = _calculate_fid_and_threshold(x3, n3, x0, n0) + # Histogram of shots for 1 and 2 + h3, bin_edges = np.histogram(shots_3, bins=100, range=_range) + h0, bin_edges = np.histogram(shots_0, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt3, popt0, params_30 = _fit_double_gauss(bin_centers, h3, h0) + # Save processed data + self.proc_data_dict['projection_30'] = {} + self.proc_data_dict['projection_30']['h3'] = h3 + self.proc_data_dict['projection_30']['h0'] = h0 + self.proc_data_dict['projection_30']['bin_centers'] = bin_centers + self.proc_data_dict['projection_30']['popt3'] = popt3 + self.proc_data_dict['projection_30']['popt0'] = popt0 + self.proc_data_dict['projection_30']['SNR'] = params_30['SNR'] + self.proc_data_dict['projection_30']['Fid'] = Fid_30 + self.proc_data_dict['projection_30']['threshold'] = threshold_30 + + ######################################### + # Analyze repeated LRU experiment shots # + ######################################### + # Assign shots + Shots_qutrit_exp = {} + Shots_qutrit_ref = {} + for r in range(self.rounds): + Shots_qutrit_exp[f'round {r+1}'] = clf.predict(Shots_exp[f'round {r+1}']) + Shots_qutrit_ref[f'round {r+1}'] = clf.predict(Shots_ref[f'round {r+1}']) + # Calculate leakage in ancilla: + Population_exp = {} + Population_ref = {} + def _get_pop_vector(Shots): + p0 = np.mean(Shots==0) + p1 = np.mean(Shots==1) + p2 = np.mean(Shots==2) + p3 = np.mean(Shots==3) + return np.array([p0, p1, p2, p3]) + M_inv = np.linalg.inv(M) + for r in range(self.rounds): + _pop_vec = _get_pop_vector(Shots_qutrit_exp[f'round {r+1}']) + Population_exp[f'round {r+1}'] = np.dot(_pop_vec, M_inv) + _pop_vec = _get_pop_vector(Shots_qutrit_ref[f'round {r+1}']) + Population_ref[f'round {r+1}'] = np.dot(_pop_vec, M_inv) + Population_f_exp = np.array([Population_exp[k][2] for k in Population_exp.keys()]) + Population_f_ref = np.array([Population_ref[k][2] for k in Population_ref.keys()]) + self.proc_data_dict['Population_exp'] = Population_exp + self.proc_data_dict['Population_f_exp'] = Population_f_exp + self.proc_data_dict['Population_ref'] = Population_ref + self.proc_data_dict['Population_f_ref'] = Population_f_ref + if self.h_state: + Population_h_exp = np.array([Population_exp[k][3] for k in Population_exp.keys()]) + Population_h_ref = np.array([Population_ref[k][3] for k in Population_ref.keys()]) + self.proc_data_dict['Population_h_exp'] = Population_h_exp + self.proc_data_dict['Population_h_ref'] = Population_h_ref + # Fit leakage and seepage rates + from scipy.optimize import curve_fit + def _func(n, L, S): + return (1 - np.exp(-n*(S+L)))*L/(S+L) + _x = np.arange(0, self.rounds+1) + _y = [0]+list(Population_f_exp) + p0 = [.02, .5] + popt, pcov = curve_fit(_func, _x, _y, p0=p0, bounds=((0,0), (1,1))) + self.proc_data_dict['fit_res_exp'] = popt, pcov + _y = [0]+list(Population_f_ref) + popt, pcov = curve_fit(_func, _x, _y, p0=p0, bounds=((0,0), (1,1))) + self.proc_data_dict['fit_res_ref'] = popt, pcov + if self.h_state: + _y = [0]+list(Population_h_exp) + popt, pcov = curve_fit(_func, _x, _y, p0=p0, bounds=((0,0), (1,1))) + self.proc_data_dict['fit_res_exp_h'] = popt, pcov + _y = [0]+list(Population_h_ref) + popt, pcov = curve_fit(_func, _x, _y, p0=p0, bounds=((0,0), (1,1))) + self.proc_data_dict['fit_res_ref_h'] = popt, pcov + + def prepare_plots(self): + + self.axs_dict = {} + fig = plt.figure(figsize=(8,4), dpi=100) + if self.h_state: + axs = [fig.add_subplot(121), + fig.add_subplot(422), + fig.add_subplot(424), + fig.add_subplot(426), + fig.add_subplot(428)] + else: + axs = [fig.add_subplot(121), + fig.add_subplot(322), + fig.add_subplot(324), + fig.add_subplot(326)] + # fig.patch.set_alpha(0) + self.axs_dict['SSRO_plot'] = axs[0] + self.figs['SSRO_plot'] = fig + self.plot_dicts['SSRO_plot'] = { + 'plotfn': ssro_IQ_projection_plotfn, + 'ax_id': 'SSRO_plot', + 'shots_0': self.proc_data_dict['shots_0_IQ'], + 'shots_1': self.proc_data_dict['shots_1_IQ'], + 'shots_2': self.proc_data_dict['shots_2_IQ'], + 'shots_3': self.proc_data_dict['shots_3_IQ'] if self.h_state \ + else None, + 'projection_01': self.proc_data_dict['projection_01'], + 'projection_12': self.proc_data_dict['projection_12'], + 'projection_02': None if self.h_state else\ + self.proc_data_dict['projection_02'], + 'projection_23': None if not self.h_state else\ + self.proc_data_dict['projection_23'], + 'projection_30': None if not self.h_state else\ + self.proc_data_dict['projection_30'], + 'classifier': self.proc_data_dict['classifier'], + 'dec_bounds': self.proc_data_dict['dec_bounds'], + 'Fid_dict': self.proc_data_dict['Fid_dict'], + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + fig, ax = plt.subplots(figsize=(3.25,3.25), dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict['Leakage_histogram'] = ax + self.figs['Leakage_histogram'] = fig + self.plot_dicts['Leakage_histogram'] = { + 'plotfn': leakage_hist_plotfn, + 'ax_id': 'Leakage_histogram', + 'shots_0': self.proc_data_dict['shots_0_IQ'], + 'shots_1': self.proc_data_dict['shots_1_IQ'], + 'shots_2': self.proc_data_dict['shots_2_IQ'], + 'shots_3': self.proc_data_dict['shots_3_IQ'] if self.h_state \ + else None, + 'shots_lru': self.proc_data_dict['shots_lru_IQ'], + 'classifier': self.proc_data_dict['classifier'], + 'dec_bounds': self.proc_data_dict['dec_bounds'], + 'qoi': self.qoi, + 'pop_vec': self.qoi['pop_vec'], + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + fig, ax = plt.subplots(figsize=(3,3), dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict['Assignment_matrix'] = ax + self.figs['Assignment_matrix'] = fig + self.plot_dicts['Assignment_matrix'] = { + 'plotfn': assignment_matrix_plotfn, + 'ax_id': 'Assignment_matrix', + 'M': self.qoi['Assignment_matrix'], + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + fig, axs = plt.subplots(figsize=(5,3), nrows=2, sharex=True, dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict['Population_vs_rounds'] = axs[0] + self.figs['Population_vs_rounds'] = fig + self.plot_dicts['Population_vs_rounds'] = { + 'plotfn': Population_vs_rounds_plotfn, + 'ax_id': 'Population_vs_rounds', + 'rounds': self.rounds, + 'Population_exp': self.proc_data_dict['Population_exp'], + 'fit_res_exp': self.proc_data_dict['fit_res_exp'], + 'Population_ref': self.proc_data_dict['Population_ref'], + 'fit_res_ref': self.proc_data_dict['fit_res_ref'], + 'fit_res_exp_h': self.proc_data_dict['fit_res_exp_h'] if self.h_state \ + else None, + 'fit_res_ref_h': self.proc_data_dict['fit_res_ref_h'] if self.h_state \ + else None, + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def Population_vs_rounds_plotfn( + rounds, + Population_exp, + fit_res_exp, + Population_ref, + fit_res_ref, + timestamp, + qubit, + ax, + fit_res_exp_h=None, + fit_res_ref_h=None, + **kw): + fig = ax.get_figure() + axs = fig.get_axes() + + def _func(n, L, S): + return (1 - np.exp(-n*(S+L)))*L/(S+L) + popt_exp, pcov_exp = fit_res_exp + perr_exp = np.sqrt(np.abs(np.diag(pcov_exp))) + popt_ref, pcov_ref = fit_res_ref + perr_ref = np.sqrt(np.abs(np.diag(pcov_ref))) + Population_g_ref = np.array([Population_ref[k][0] for k in Population_ref.keys()]) + Population_e_ref = np.array([Population_ref[k][1] for k in Population_ref.keys()]) + Population_f_ref = np.array([Population_ref[k][2] for k in Population_ref.keys()]) + Population_g_exp = np.array([Population_exp[k][0] for k in Population_exp.keys()]) + Population_e_exp = np.array([Population_exp[k][1] for k in Population_exp.keys()]) + Population_f_exp = np.array([Population_exp[k][2] for k in Population_exp.keys()]) + if not (fit_res_exp_h is None): + Population_h_ref = np.array([Population_ref[k][3] for k in Population_ref.keys()]) + Population_h_exp = np.array([Population_exp[k][3] for k in Population_exp.keys()]) + popt_exp_h, pcov_exp_h = fit_res_exp_h + perr_exp_h = np.sqrt(np.abs(np.diag(pcov_exp_h))) + popt_ref_h, pcov_ref_h = fit_res_ref_h + perr_ref_h = np.sqrt(np.abs(np.diag(pcov_ref_h))) + _rounds_arr = np.arange(rounds)+1 + axs[0].plot(_rounds_arr, Population_g_ref, 'C0-', alpha=.5, label='$|g\\rangle_\\mathrm{{Ref.}}$') + axs[0].plot(_rounds_arr, Population_e_ref, 'C3-', alpha=.5, label='$|e\\rangle_\\mathrm{{Ref.}}$') + axs[0].plot(_rounds_arr, Population_g_exp, 'C0-', label='$|g\\rangle_\\mathrm{{Gate}}$') + axs[0].plot(_rounds_arr, Population_e_exp, 'C3-', label='$|e\\rangle_\\mathrm{{Gate}}$') + axs[1].plot(_rounds_arr, _func(_rounds_arr, *popt_ref), 'k--') + axs[1].plot(_rounds_arr, _func(_rounds_arr, *popt_exp), 'k--') + if not (fit_res_exp_h is None): + axs[1].plot(_rounds_arr, _func(_rounds_arr, *popt_ref_h), '--', color='goldenrod') + axs[1].plot(_rounds_arr, _func(_rounds_arr, *popt_exp_h), '--', color='goldenrod') + axs[1].plot(_rounds_arr, Population_f_ref, 'C2-', alpha=.5, label='$|f\\rangle_\\mathrm{{Ref.}}$') + axs[1].plot(_rounds_arr, Population_f_exp, 'C2-', label='$|f\\rangle_\\mathrm{{Gate}}$') + if not (fit_res_exp_h is None): + axs[1].plot(_rounds_arr, Population_h_ref, '-', color='gold', alpha=.5, label='$|h\\rangle_\\mathrm{{Ref.}}$') + axs[1].plot(_rounds_arr, Population_h_exp, '-', color='gold', label='$|h\\rangle_\\mathrm{{Gate}}$') + txtstr = 'Ref.:\n'+\ + f'$L_1={popt_ref[0]*100:.2f} \\pm {perr_ref[0]:.2f}\\%$\n'+\ + f'$L_2={popt_ref[1]*100:.2f} \\pm {perr_ref[1]:.2f}\\%$\n' + if not (fit_res_exp_h is None): + txtstr+= f'$L_1^h={popt_ref_h[0]*100:.2f} \\pm {perr_ref_h[0]:.2f}\\%$\n'+\ + f'$L_2^h={popt_ref_h[1]*100:.2f} \\pm {perr_ref_h[1]:.2f}\\%$\n' + txtstr+= 'Gate:\n'+\ + f'$L_1={popt_exp[0]*100:.2f} \\pm {perr_exp[0]:.2f}\\%$\n'+\ + f'$L_2={popt_exp[1]*100:.2f} \\pm {perr_exp[1]:.2f}\\%$' + if not (fit_res_exp_h is None): + txtstr+= f'\n$L_1^h={popt_exp_h[0]*100:.2f} \\pm {perr_exp_h[0]:.2f}\\%$\n'+\ + f'$L_2^h={popt_exp_h[1]*100:.2f} \\pm {perr_exp_h[1]:.2f}\\%$' + props = dict(boxstyle='round', facecolor='white', alpha=1) + axs[1].text(1.3, 1, txtstr, transform=axs[0].transAxes, + verticalalignment='top', bbox=props) + axs[0].legend(loc=2, frameon=False, bbox_to_anchor=(1.01,1)) + axs[1].legend(loc=2, frameon=False, bbox_to_anchor=(1.01,1)) + axs[0].set_ylabel('Population') + axs[1].set_ylabel('Population') + axs[1].set_xlabel('Rounds') + axs[0].set_title(f'{timestamp}\nRepeated LRU experiment {qubit}') + + +def _get_expected_value(operator, state, n): + m = 1 + for i in range(n): + if operator[i] == 'Z' and state[i] == '1': + m *= -1 + return m + +def _gen_M_matrix(n): + # List of different Operators + ops = ['I','Z'] + Operators = [''.join(op) for op in itertools.product(ops, repeat=n)] + # List of calibration points + states = ['0','1'] + Cal_points = [''.join(s) for s in itertools.product(states, repeat=n)] + # Calculate M matrix + M = np.zeros((2**n, 2**n), dtype=int) + for j, state in enumerate(Cal_points): + Betas = np.ones(len(Operators)) + for i in range(2**n): + Betas[i] = _get_expected_value(Operators[i], state, n) + M[j] = Betas + M = np.linalg.pinv(M) # invert matrix + return M + +def _get_Beta_matrix(Cal_shots_dig, n): + ''' + Calculate RO error model (Beta) matrix. + should be a dictionary with the format: + Cal_shots_dig[][] = array of shots with +1 and -1. + ''' + # List of different Operators + ops = ['I','Z'] + Operators = [''.join(op) for op in itertools.product(ops, repeat=n)] + # List of qubits + Qubits = list(Cal_shots_dig.keys()) + # Calculate Beta matrix + H = {} + B = {} + M = _gen_M_matrix(n) + for op in Operators[1:]: + H[op] = np.zeros(2**n) + for i, state in enumerate(Cal_shots_dig[Qubits[0]].keys()): + correlator = 1 + for j, qubit in enumerate(Qubits): + if op[j] == 'Z': + correlator *= np.array(Cal_shots_dig[Qubits[j]][state]) + H[op][i] = np.mean(correlator) + B[op] = np.dot(M, H[op]) + return B + +def _correct_pauli_vector(Beta_matrix, P_vector): + ''' + Applies readout correction from Beta matrix + to a single qubit pauli vector. + ''' + B_matrix = np.array([Beta_matrix[key][1:] for key in Beta_matrix.keys()]) + B_0 = np.array([Beta_matrix[key][0] for key in Beta_matrix.keys()]) + iB_matrix = np.linalg.inv(B_matrix) + # This part is ony valid for single qubit pauli vectors + _p_vec_corrected = (P_vector[1:]-B_0)*iB_matrix + P_corrected = np.concatenate(([1], _p_vec_corrected[0])) + return P_corrected + +def _gen_density_matrix(mx, my, mz): + Pauli_ops = {} + Pauli_ops['I'] = np.array([[ 1, 0], + [ 0, 1]]) + Pauli_ops['Z'] = np.array([[ 1, 0], + [ 0, -1]]) + Pauli_ops['X'] = np.array([[ 0, 1], + [ 1, 0]]) + Pauli_ops['Y'] = np.array([[ 0,-1j], + [ 1j, 0]]) + rho = (Pauli_ops['I'] + mx*Pauli_ops['X'] + my*Pauli_ops['Y'] + mz*Pauli_ops['Z'])/2 + return rho + +def _get_choi_from_PTM(PTM, dim=2): + Pauli_ops = {} + Pauli_ops['I'] = np.array([[ 1, 0], + [ 0, 1]]) + Pauli_ops['Z'] = np.array([[ 1, 0], + [ 0, -1]]) + Pauli_ops['X'] = np.array([[ 0, 1], + [ 1, 0]]) + Pauli_ops['Y'] = np.array([[ 0,-1j], + [ 1j, 0]]) + paulis = [Pauli_ops['I'], + Pauli_ops['X'], + Pauli_ops['Y'], + Pauli_ops['Z']] + choi_state = np.zeros([dim**2, dim**2], dtype='complex') + for i in range(dim**2): + for j in range(dim**2): + choi_state += 1/dim**2 * PTM[i,j] * np.kron(paulis[j].transpose(), paulis[i]) + return choi_state + +def _get_pauli_transfer_matrix(Pauli_0, Pauli_1, + Pauli_p, Pauli_m, + Pauli_ip, Pauli_im, + M_in = None): + if type(M_in) == type(None): + M_in = np.array([[1, 0, 0, 1], # 0 + [1, 0, 0,-1], # 1 + [1, 1, 0, 0], # + + [1,-1, 0, 0], # - + [1, 0, 1, 0], # +i + [1, 0,-1, 0]]) # -i + M_in=np.transpose(M_in) + M_out= np.array([Pauli_0, + Pauli_1, + Pauli_p, + Pauli_m, + Pauli_ip, + Pauli_im]) + M_out=np.transpose(M_out) + R = np.matmul(M_out, np.linalg.pinv(M_in)) + # Check for physicality + choi_state = _get_choi_from_PTM(R) + if (np.real(np.linalg.eig(choi_state)[0]) < 0).any(): + print('PTM is unphysical') + else: + print('PTM is physical') + return R + +def _PTM_angle(angle): + angle *= np.pi/180 + R = np.array([[ 1, 0, 0, 0], + [ 0, np.cos(angle),-np.sin(angle), 0], + [ 0, np.sin(angle), np.cos(angle), 0], + [ 0, 0, 0, 1]]) + return R + +def _PTM_fidelity(R, R_id): + return (np.trace(np.matmul(np.transpose(R_id), R))/2+1)/3 + +class LRU_process_tomo_Analysis(ba.BaseDataAnalysis): + """ + Analysis for LRU process tomography experiment. + """ + def __init__(self, + qubit: str, + post_select_2state: bool = False, + fit_3gauss: bool = False, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + auto=True + ): + + super().__init__(t_start=t_start, t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) + + self.qubit = qubit + self.post_select_2state = post_select_2state + self.fit_3gauss = fit_3gauss + if auto: + self.run_analysis() + + def extract_data(self): + self.get_timestamps() + self.timestamp = self.timestamps[0] + data_fp = get_datafilepath_from_timestamp(self.timestamp) + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + self.raw_data_dict = h5d.extract_pars_from_datafile( + data_fp, param_spec) + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + + def process_data(self): + _cycle = 18+4 + ################################ + # Rotate shots in IQ plane + ################################ + # Sort shots + _raw_shots = self.raw_data_dict['data'][:,1:] + _shots_0 = _raw_shots[18::_cycle] + _shots_1 = _raw_shots[19::_cycle] + _shots_2 = _raw_shots[20::_cycle] + # Rotate data + center_0 = np.array([np.mean(_shots_0[:,0]), np.mean(_shots_0[:,1])]) + center_1 = np.array([np.mean(_shots_1[:,0]), np.mean(_shots_1[:,1])]) + center_2 = np.array([np.mean(_shots_2[:,0]), np.mean(_shots_2[:,1])]) + raw_shots = rotate_and_center_data(_raw_shots[:,0], _raw_shots[:,1], center_0, center_1) + Shots_0 = raw_shots[18::_cycle] + Shots_1 = raw_shots[19::_cycle] + Shots_2 = raw_shots[20::_cycle] + Shots_3 = raw_shots[21::_cycle] + self.proc_data_dict['shots_0_IQ'] = Shots_0 + self.proc_data_dict['shots_1_IQ'] = Shots_1 + self.proc_data_dict['shots_2_IQ'] = Shots_2 + self.proc_data_dict['shots_3_IQ'] = Shots_3 + # Use classifier for data + data = np.concatenate((Shots_0, Shots_1, Shots_2)) + labels = [0 for s in Shots_0]+[1 for s in Shots_1]+[2 for s in Shots_2] + from sklearn.discriminant_analysis import LinearDiscriminantAnalysis + clf = LinearDiscriminantAnalysis() + clf.fit(data, labels) + dec_bounds = _decision_boundary_points(clf.coef_, clf.intercept_) + Fid_dict = {} + for state, shots in zip([ '0', '1', '2'], + [Shots_0, Shots_1, Shots_2]): + _res = clf.predict(shots) + _fid = np.mean(_res == int(state)) + Fid_dict[state] = _fid + Fid_dict['avg'] = np.mean([f for f in Fid_dict.values()]) + # Get assignment fidelity matrix + M = np.zeros((3,3)) + for i, shots in enumerate([Shots_0, Shots_1, Shots_2]): + for j, state in enumerate(['0', '1', '2']): + _res = clf.predict(shots) + M[i][j] = np.mean(_res == int(state)) + # Get leakage removal fraction + _res = clf.predict(Shots_3) + _vec = np.array([np.mean(_res == int('0')), + np.mean(_res == int('1')), + np.mean(_res == int('2'))]) + M_inv = np.linalg.inv(M) + pop_vec = np.dot(_vec, M_inv) + self.proc_data_dict['classifier'] = clf + self.proc_data_dict['dec_bounds'] = dec_bounds + self.proc_data_dict['Fid_dict'] = Fid_dict + self.qoi = {} + self.qoi['Fid_dict'] = Fid_dict + self.qoi['Assignment_matrix'] = M + self.qoi['pop_vec'] = pop_vec + self.qoi['removal_fraction'] = 1-pop_vec[2] + ######################################### + # Project data along axis perpendicular + # to the decision boundaries. + ######################################### + ############################ + # Projection along 01 axis. + ############################ + # Rotate shots over 01 axis + shots_0 = rotate_and_center_data(Shots_0[:,0],Shots_0[:,1],dec_bounds['mean'], dec_bounds['01'], phi=np.pi/2) + shots_1 = rotate_and_center_data(Shots_1[:,0],Shots_1[:,1],dec_bounds['mean'], dec_bounds['01'], phi=np.pi/2) + # Take relavant quadrature + shots_0 = shots_0[:,0] + shots_1 = shots_1[:,0] + n_shots_1 = len(shots_1) + # find range + _all_shots = np.concatenate((shots_0, shots_1)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x0, n0 = np.unique(shots_0, return_counts=True) + x1, n1 = np.unique(shots_1, return_counts=True) + Fid_01, threshold_01 = _calculate_fid_and_threshold(x0, n0, x1, n1) + # Histogram of shots for 1 and 2 + h0, bin_edges = np.histogram(shots_0, bins=100, range=_range) + h1, bin_edges = np.histogram(shots_1, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt0, popt1, params_01 = _fit_double_gauss(bin_centers, h0, h1) + # Save processed data + self.proc_data_dict['projection_01'] = {} + self.proc_data_dict['projection_01']['h0'] = h0 + self.proc_data_dict['projection_01']['h1'] = h1 + self.proc_data_dict['projection_01']['bin_centers'] = bin_centers + self.proc_data_dict['projection_01']['popt0'] = popt0 + self.proc_data_dict['projection_01']['popt1'] = popt1 + self.proc_data_dict['projection_01']['SNR'] = params_01['SNR'] + self.proc_data_dict['projection_01']['Fid'] = Fid_01 + self.proc_data_dict['projection_01']['threshold'] = threshold_01 + ############################ + # Projection along 12 axis. + ############################ + # Rotate shots over 12 axis + shots_1 = rotate_and_center_data(Shots_1[:,0],Shots_1[:,1],dec_bounds['mean'], dec_bounds['12'], phi=np.pi/2) + shots_2 = rotate_and_center_data(Shots_2[:,0],Shots_2[:,1],dec_bounds['mean'], dec_bounds['12'], phi=np.pi/2) + # Take relavant quadrature + shots_1 = shots_1[:,0] + shots_2 = shots_2[:,0] + n_shots_2 = len(shots_2) + # find range + _all_shots = np.concatenate((shots_1, shots_2)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x1, n1 = np.unique(shots_1, return_counts=True) + x2, n2 = np.unique(shots_2, return_counts=True) + Fid_12, threshold_12 = _calculate_fid_and_threshold(x1, n1, x2, n2) + # Histogram of shots for 1 and 2 + h1, bin_edges = np.histogram(shots_1, bins=100, range=_range) + h2, bin_edges = np.histogram(shots_2, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt1, popt2, params_12 = _fit_double_gauss(bin_centers, h1, h2) + # Save processed data + self.proc_data_dict['projection_12'] = {} + self.proc_data_dict['projection_12']['h1'] = h1 + self.proc_data_dict['projection_12']['h2'] = h2 + self.proc_data_dict['projection_12']['bin_centers'] = bin_centers + self.proc_data_dict['projection_12']['popt1'] = popt1 + self.proc_data_dict['projection_12']['popt2'] = popt2 + self.proc_data_dict['projection_12']['SNR'] = params_12['SNR'] + self.proc_data_dict['projection_12']['Fid'] = Fid_12 + self.proc_data_dict['projection_12']['threshold'] = threshold_12 + ############################ + # Projection along 02 axis. + ############################ + # Rotate shots over 02 axis + shots_0 = rotate_and_center_data(Shots_0[:,0],Shots_0[:,1],dec_bounds['mean'],dec_bounds['02'], phi=np.pi/2) + shots_2 = rotate_and_center_data(Shots_2[:,0],Shots_2[:,1],dec_bounds['mean'],dec_bounds['02'], phi=np.pi/2) + # Take relavant quadrature + shots_0 = shots_0[:,0] + shots_2 = shots_2[:,0] + n_shots_2 = len(shots_2) + # find range + _all_shots = np.concatenate((shots_0, shots_2)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x0, n0 = np.unique(shots_0, return_counts=True) + x2, n2 = np.unique(shots_2, return_counts=True) + Fid_02, threshold_02 = _calculate_fid_and_threshold(x0, n0, x2, n2) + # Histogram of shots for 1 and 2 + h0, bin_edges = np.histogram(shots_0, bins=100, range=_range) + h2, bin_edges = np.histogram(shots_2, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt0, popt2, params_02 = _fit_double_gauss(bin_centers, h0, h2) + # Save processed data + self.proc_data_dict['projection_02'] = {} + self.proc_data_dict['projection_02']['h0'] = h0 + self.proc_data_dict['projection_02']['h2'] = h2 + self.proc_data_dict['projection_02']['bin_centers'] = bin_centers + self.proc_data_dict['projection_02']['popt0'] = popt0 + self.proc_data_dict['projection_02']['popt2'] = popt2 + self.proc_data_dict['projection_02']['SNR'] = params_02['SNR'] + self.proc_data_dict['projection_02']['Fid'] = Fid_02 + self.proc_data_dict['projection_02']['threshold'] = threshold_02 + ############################### + # Fit 3-gaussian mixture + ############################### + if self.fit_3gauss: + _all_shots = np.concatenate((self.proc_data_dict['shots_0_IQ'], + self.proc_data_dict['shots_1_IQ'], + self.proc_data_dict['shots_2_IQ'])) + _lim = np.max([ np.max(np.abs(_all_shots[:,0]))*1.1, + np.max(np.abs(_all_shots[:,1]))*1.1 ]) + def _histogram_2d(x_data, y_data, lim): + _bins = (np. linspace(-lim, lim, 201), np.linspace(-lim, lim, 201)) + n, _xbins, _ybins = np.histogram2d(x_data, y_data, bins=_bins) + return n.T, _xbins, _ybins + # 2D histograms + n_0, xbins, ybins = _histogram_2d(*self.proc_data_dict['shots_0_IQ'].T, lim=_lim) + n_1, xbins, ybins = _histogram_2d(*self.proc_data_dict['shots_1_IQ'].T, lim=_lim) + n_2, xbins, ybins = _histogram_2d(*self.proc_data_dict['shots_2_IQ'].T, lim=_lim) + n_3, xbins, ybins = _histogram_2d(*self.proc_data_dict['shots_3_IQ'].T, lim=_lim) + # bin centers + xbins_c, ybins_c = (xbins[1:]+xbins[:-1])/2, (ybins[1:]+ybins[:-1])/2 + popt0, popt1, popt2, qoi = _fit_triple_gauss(xbins_c, ybins_c, n_0, n_1, n_2) + popt3, _popt1, _popt2, _qoi = _fit_triple_gauss(xbins_c, ybins_c, n_3, n_1, n_2) + self.proc_data_dict['3gauss_fit'] = {} + self.proc_data_dict['3gauss_fit']['xbins'] = xbins + self.proc_data_dict['3gauss_fit']['ybins'] = ybins + self.proc_data_dict['3gauss_fit']['n0'] = n_0 + self.proc_data_dict['3gauss_fit']['n1'] = n_1 + self.proc_data_dict['3gauss_fit']['n2'] = n_2 + self.proc_data_dict['3gauss_fit']['n3'] = n_3 + self.proc_data_dict['3gauss_fit']['popt0'] = popt0 + self.proc_data_dict['3gauss_fit']['popt1'] = popt1 + self.proc_data_dict['3gauss_fit']['popt2'] = popt2 + self.proc_data_dict['3gauss_fit']['popt3'] = popt3 + self.proc_data_dict['3gauss_fit']['qoi'] = _qoi + self.proc_data_dict['3gauss_fit']['removal_fraction'] = 1-_qoi['P_2g'] + self.qoi['removal_fraction_gauss_fit'] = 1-_qoi['P_2g'] + + ################################ + # Process tomo analysis + ################################ + _thresh = self.proc_data_dict['projection_01']['threshold'] + if self.post_select_2state: + _dig_shots = clf.predict(raw_shots) + else: + _dig_shots = [0 if s < _thresh else 1 for s in raw_shots[:,0]] + # Beta matrix for readout corrections + cal_shots_dig = {self.qubit:{}} + cal_shots_dig[self.qubit]['0'] = [+1 if s < _thresh else -1 for s in Shots_0[:,0]] + cal_shots_dig[self.qubit]['1'] = [+1 if s < _thresh else -1 for s in Shots_1[:,0]] + Beta_matrix = _get_Beta_matrix(cal_shots_dig, n=1) + # Calculate expectation values for each state + States = ['0', '1', '+', '-', '+i', '-i'] + Operators = ['Z', 'X', 'Y'] + # Parse shots and post-select leakage + dig_shots = {} + leak_frac = 0 + for i, state in enumerate(States): + dig_shots[state] = {} + for j, op in enumerate(Operators): + _shot_list = _dig_shots[i*3+j::_cycle] + # Post-select on leakage + _shot_list = [ s for s in _shot_list if s!=2 ] + _leak_frac = 1-len(_shot_list)/len(_dig_shots[i*3+j::_cycle]) + leak_frac += _leak_frac/18 + # Turn in meas outcomes (+1/-1) + dig_shots[state][op] = 1 - 2*np.array(_shot_list) + # Build density matrices and pauli vector for each input state + # Ideal states + Density_matrices_ideal = {} + Density_matrices_ideal['0'] = _gen_density_matrix(mx=0, my=0, mz=+1) + Density_matrices_ideal['1'] = _gen_density_matrix(mx=0, my=0, mz=-1) + Density_matrices_ideal['+'] = _gen_density_matrix(mx=+1, my=0, mz=0) + Density_matrices_ideal['-'] = _gen_density_matrix(mx=-1, my=0, mz=0) + Density_matrices_ideal['+i'] = _gen_density_matrix(mx=0, my=+1, mz=0) + Density_matrices_ideal['-i'] = _gen_density_matrix(mx=0, my=-1, mz=0) + Density_matrices = {} + Pauli_vectors = {} + for state in States: + mz = np.mean(dig_shots[state]['Z']) + mx = np.mean(dig_shots[state]['X']) + my = np.mean(dig_shots[state]['Y']) + p_vector = np.array([1, mx, my, mz]) + Pauli_vectors[state] = _correct_pauli_vector(Beta_matrix, p_vector) + rho = _gen_density_matrix(*Pauli_vectors[state][1:]) + Density_matrices[state] = rho + # Get PTM + PTM = _get_pauli_transfer_matrix(Pauli_0 = Pauli_vectors['0'], + Pauli_1 = Pauli_vectors['1'], + Pauli_p = Pauli_vectors['+'], + Pauli_m = Pauli_vectors['-'], + Pauli_ip = Pauli_vectors['+i'], + Pauli_im = Pauli_vectors['-i']) + # Calculate angle of plus state + angle_p = np.arctan2(Pauli_vectors['+'][2],Pauli_vectors['+'][1])*180/np.pi + # get ideal PTM with same angle and + # calculate fidelity of extracted PTM. + PTM_id = np.eye(4) + F_PTM = _PTM_fidelity(PTM, PTM_id) + PTM_id = _PTM_angle(angle_p) + F_PTM_rotated = _PTM_fidelity(PTM, PTM_id) + self.proc_data_dict['PS_frac'] = leak_frac + self.proc_data_dict['Density_matrices'] = Density_matrices + self.proc_data_dict['Density_matrices_ideal'] = Density_matrices_ideal + self.proc_data_dict['PTM'] = PTM + self.proc_data_dict['angle_p'] = angle_p + self.proc_data_dict['F_PTM'] = F_PTM + self.proc_data_dict['F_PTM_r'] = F_PTM_rotated + + def prepare_plots(self): + self.axs_dict = {} + fig = plt.figure(figsize=(8,4), dpi=100) + axs = [fig.add_subplot(121), + fig.add_subplot(322), + fig.add_subplot(324), + fig.add_subplot(326)] + # fig.patch.set_alpha(0) + self.axs_dict['SSRO_plot'] = axs[0] + self.figs['SSRO_plot'] = fig + self.plot_dicts['SSRO_plot'] = { + 'plotfn': ssro_IQ_projection_plotfn, + 'ax_id': 'SSRO_plot', + 'shots_0': self.proc_data_dict['shots_0_IQ'], + 'shots_1': self.proc_data_dict['shots_1_IQ'], + 'shots_2': self.proc_data_dict['shots_2_IQ'], + 'projection_01': self.proc_data_dict['projection_01'], + 'projection_12': self.proc_data_dict['projection_12'], + 'projection_02': self.proc_data_dict['projection_02'], + 'classifier': self.proc_data_dict['classifier'], + 'dec_bounds': self.proc_data_dict['dec_bounds'], + 'Fid_dict': self.proc_data_dict['Fid_dict'], + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + fig, ax = plt.subplots(figsize=(3,3), dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict['Assignment_matrix'] = ax + self.figs['Assignment_matrix'] = fig + self.plot_dicts['Assignment_matrix'] = { + 'plotfn': assignment_matrix_plotfn, + 'ax_id': 'Assignment_matrix', + 'M': self.qoi['Assignment_matrix'], + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + fig, ax = plt.subplots(figsize=(3.25,3.25), dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict['Leakage_histogram'] = ax + self.figs['Leakage_histogram'] = fig + self.plot_dicts['Leakage_histogram'] = { + 'plotfn': leakage_hist_plotfn, + 'ax_id': 'Leakage_histogram', + 'shots_0': self.proc_data_dict['shots_0_IQ'], + 'shots_1': self.proc_data_dict['shots_1_IQ'], + 'shots_2': self.proc_data_dict['shots_2_IQ'], + 'shots_lru': self.proc_data_dict['shots_3_IQ'], + 'classifier': self.proc_data_dict['classifier'], + 'dec_bounds': self.proc_data_dict['dec_bounds'], + 'pop_vec': self.qoi['pop_vec'], + 'qoi': self.qoi, + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + fig = plt.figure(figsize=(3*1.5, 6*1.5), dpi=200) + axs =[] + for i in range(6): + axs.append(fig.add_subplot(3, 2, i+1 , projection='3d', azim=-35, elev=35)) + # fig.patch.set_alpha(0) + self.axs_dict['Density_matrices'] = axs[0] + self.figs['Density_matrices'] = fig + self.plot_dicts['Density_matrices'] = { + 'plotfn': density_matrices_plotfn, + 'ax_id': 'Density_matrices', + 'Density_matrices': self.proc_data_dict['Density_matrices'], + 'Density_matrices_ideal': self.proc_data_dict['Density_matrices_ideal'], + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + fig, ax = plt.subplots(figsize=(3.5,3.5), dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict['Pauli_transfer_matrix'] = ax + self.figs['Pauli_transfer_matrix'] = fig + self.plot_dicts['Pauli_transfer_matrix'] = { + 'plotfn': PTM_plotfn, + 'ax_id': 'Pauli_transfer_matrix', + 'R': self.proc_data_dict['PTM'], + 'F_PTM': self.proc_data_dict['F_PTM'], + 'angle': self.proc_data_dict['angle_p'], + 'F_PTM_r': self.proc_data_dict['F_PTM_r'], + 'title': r'PTM, $\mathcal{R}_{\mathrm{LRU}}$'+f' {self.qubit}', + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + if self.fit_3gauss: + fig = plt.figure(figsize=(12,4), dpi=100) + axs = [None,None,None] + axs[0] = fig.add_subplot(1, 3, 1) + axs[2] = fig.add_subplot(1, 3, 3 , projection='3d', elev=20) + axs[1] = fig.add_subplot(1, 3, 2) + # fig.patch.set_alpha(0) + self.axs_dict['Three_gauss_fit'] = axs[0] + self.figs['Three_gauss_fit'] = fig + self.plot_dicts['Three_gauss_fit'] = { + 'plotfn': gauss_fit2D_plotfn, + 'ax_id': 'Three_gauss_fit', + 'fit_dict': self.proc_data_dict['3gauss_fit'], + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def _plot_density_matrix(rho, f, ax, rho_ideal=None, state=None, cbar=True): + if state is None: + pass + else: + ax.set_title('Logical state '+state, pad=10, fontsize=5) + ax.set_xticks([-.1, .9]) + ax.set_yticks([-.1, .9]) + ax.set_xticklabels(['$0$', '$1$'], rotation=0, fontsize=4.5) + ax.set_yticklabels(['$0$', '$1$'], rotation=0, fontsize=4.5) + ax.tick_params(axis='x', which='major', pad=-6) + ax.tick_params(axis='y', which='major', pad=-7) + ax.tick_params(axis='z', which='major', pad=-4) + for tick in ax.yaxis.get_majorticklabels(): + tick.set_horizontalalignment("left") + ax.set_zticks(np.linspace(0, 1, 3)) + ax.set_zticklabels(['0', '0.5', '1'], fontsize=4) + ax.set_zlim(0, 1) + + xedges = np.arange(-.75, 2, 1) + yedges = np.arange(-.75, 2, 1) + xpos, ypos = np.meshgrid(xedges[:-1] + 0.25, yedges[:-1] + 0.25, indexing="ij") + xpos = xpos.ravel() + ypos = ypos.ravel() + zpos = 0 + dx = dy = .8 + dz = np.abs(rho).ravel() + + cmap = matplotlib.colors.LinearSegmentedColormap.from_list("", ["C3",'darkseagreen',"C0", + 'antiquewhite',"C3"]) + norm = matplotlib.colors.Normalize(vmin=-np.pi, vmax=np.pi) + color=cmap(norm([np.angle(e) for e in rho.ravel()])) + ax.bar3d(xpos, ypos, zpos, dx, dy, dz, zsort='max', + color=color, alpha=1 , edgecolor='black', linewidth=.1) + if rho_ideal is not None: + dz1 = np.abs(rho_ideal).ravel() + color1=cmap(norm([np.angle(e) for e in rho_ideal.ravel()])) + # selector + s = [k for k in range(len(dz1)) if dz1[k] > .15] + ax.bar3d(xpos[s], ypos[s], dz[s], dx, dy, dz=dz1[s]-dz[s], zsort='min', + color=color1[s], alpha=.25, edgecolor='black', linewidth=.4) + sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) + if cbar: + cb = f.colorbar(sm) + cb.set_ticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi]) + cb.set_ticklabels(['$-\pi$', '$-\pi/2$', '0', '$\pi/2$', '$\pi$']) + cb.set_label('arg', fontsize=3) + cb.ax.tick_params(labelsize=3) + cb.outline.set_linewidth(.5) + cb.ax.tick_params(labelsize=3, width=.5, length=1, pad=1) + f.axes[-1].set_position([.85, .2, .05, .6]) + f.axes[-1].get_yaxis().labelpad=-4 + ax.set_zlabel(r'$|\rho|$', fontsize=5, labelpad=-51) + +def density_matrices_plotfn( + Density_matrices, + Density_matrices_ideal, + timestamp, + qubit, + ax, + title=None, + **kw): + fig = ax.get_figure() + axs = fig.get_axes() + + States = [s for s in Density_matrices.keys()] + for i, state in enumerate(States): + axs[i].axes.axes.set_position((.2+(i//2)*.225, .43-(i%2)*.1, .15*1.5, .075*1.1)) + _plot_density_matrix(Density_matrices[state], fig, axs[i], state=None, cbar=False, + rho_ideal=Density_matrices_ideal[state]) + axs[i].set_title(r'Input state $|{}\rangle$'.format(state), fontsize=5, pad=-5) + axs[i].patch.set_visible(False) + # vertical colorbar + from mpl_toolkits.axes_grid1.inset_locator import inset_axes + cbar_ax = fig.add_axes([.9, .341, .01, .15]) + cmap = matplotlib.colors.LinearSegmentedColormap.from_list('', ['C3','darkseagreen','C0','antiquewhite','C3']) + norm = matplotlib.colors.Normalize(vmin=-np.pi, vmax=np.pi) + sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) + cb = fig.colorbar(sm, cax=cbar_ax, orientation='vertical') + cb.set_ticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi]) + cb.set_ticklabels(['$-\pi$', '$-\pi/2$', '0', '$\pi/2$', '$\pi$']) + cb.set_label(r'arg($\rho$)', fontsize=5, labelpad=-8) + cb.ax.tick_params(labelsize=6) + cb.outline.set_linewidth(.5) + cb.ax.tick_params(labelsize=5, width=.5, length=2, pad=3) + _title = timestamp + if title: + _title += '\n'+title + fig.suptitle(_title, y=.55, x=.55, size=6.5) + +def PTM_plotfn( + R, + F_PTM, + angle, + F_PTM_r, + timestamp, + qubit, + ax, + title=None, + **kw): + im = ax.imshow(R, cmap=plt.cm.PiYG, vmin=-1, vmax=1) + ax.set_xticks(np.arange(4)) + ax.set_yticks(np.arange(4)) + ax.set_xticklabels([ '$I$', '$X$', '$Y$', '$Z$']) + ax.set_yticklabels([ '$I$', '$X$', '$Y$', '$Z$']) + for i in range(4): + for j in range(4): + c = R[j,i] + if abs(c) > .5: + ax.text(i, j, '{:.2f}'.format(c), va='center', ha='center', + color = 'white') + else: + ax.text(i, j, '{:.2f}'.format(c), va='center', ha='center') + _title = f'{timestamp}' + if title: + _title += '\n'+title + ax.set_title(_title, pad=4) + ax.set_xlabel('input Pauli operator') + ax.set_ylabel('Output Pauli operator') + ax.axes.tick_params(width=.5, length=2) + ax.spines['left'].set_linewidth(.5) + ax.spines['right'].set_linewidth(.5) + ax.spines['top'].set_linewidth(.5) + ax.spines['bottom'].set_linewidth(.5) + if F_PTM: + text = '\n'.join(('Average gate fidelity:', + f'$F_\mathrm{"{avg}"}=${F_PTM*100:.1f}%', + 'Average gate fidelity\nafter rotation:', + f'$\phi=${angle:.1f}$^\mathrm{"{o}"}$', + f'$F_\mathrm{"{avg}"}^\phi=${F_PTM_r*100:.1f}%')) + props = dict(boxstyle='round', facecolor='white', alpha=1) + ax.text(1.05, 1., text, transform=ax.transAxes, + verticalalignment='top', bbox=props) + + +class LRU_frequency_sweep_Analysis(ba.BaseDataAnalysis): + """ + Analysis for LRU frequency sweep experiment. + """ + def __init__(self, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + auto=True + ): + + super().__init__(t_start=t_start, t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) + if auto: + self.run_analysis() + + def extract_data(self): + self.get_timestamps() + self.timestamp = self.timestamps[0] + data_fp = get_datafilepath_from_timestamp(self.timestamp) + self.qubit = (data_fp.split('.')[0]).split('_')[-1] + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + self.raw_data_dict = h5d.extract_pars_from_datafile( + data_fp, param_spec) + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + + def process_data(self): + # Sort raw data + Frequency = self.raw_data_dict['data'][:,0] + P_0 = self.raw_data_dict['data'][:,1] + P_1 = self.raw_data_dict['data'][:,2] + P_2 = self.raw_data_dict['data'][:,3] + # Mark with np.Nan unphysical results + _P0 = np.array([ p if (p>-0.07 and p<1.05) else np.nan for p in P_0 ]) + _P1 = np.array([ p if (p>-0.07 and p<1.05) else np.nan for p in P_1 ]) + _P2 = np.array([ p if (p>-0.07 and p<1.05) else np.nan for p in P_2 ]) + # Write cost function + CF = (1-_P0)**2 + 10*(_P1)**2 + (_P2)**2 + # Find frequency that minimizes P_2 + f_opt = Frequency[np.nanargmin(CF)] + # Store parameters + self.proc_data_dict['Frequency'] = Frequency + self.proc_data_dict['Population_0'] = P_0 + self.proc_data_dict['Population_1'] = P_1 + self.proc_data_dict['Population_2'] = P_2 + self.qoi = {'f_optimal': f_opt} + + def prepare_plots(self): + self.axs_dict = {} + fig, axs = plt.subplots(figsize=(8,6), nrows=3, sharex=True, dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict['LRU_frequency_sweep'] = axs[0] + self.figs['LRU_frequency_sweep'] = fig + self.plot_dicts['LRU_frequency_sweep'] = { + 'plotfn': LRU_frequency_sweep_plotfn, + 'ax_id': 'LRU_frequency_sweep', + 'Frequency': self.proc_data_dict['Frequency'], + 'Population_0': self.proc_data_dict['Population_0'], + 'Population_1': self.proc_data_dict['Population_1'], + 'Population_2': self.proc_data_dict['Population_2'], + 'f_optimal': self.qoi['f_optimal'], + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def LRU_frequency_sweep_plotfn( + Frequency, + Population_0, + Population_1, + Population_2, + f_optimal, + timestamp, + qubit, + ax, + **kw): + fig = ax.get_figure() + axs = fig.get_axes() + # Plot quantities + axs[0].plot(Frequency*1e-9, Population_0*100, 'C0-') + axs[1].plot(Frequency*1e-9, Population_1*100, 'C3-') + axs[2].plot(Frequency*1e-9, Population_2*100, 'C2-') + axs[0].axvline(f_optimal*1e-9, color='k', ls='--', lw=1) + axs[1].axvline(f_optimal*1e-9, color='k', ls='--', lw=1) + axs[2].axvline(f_optimal*1e-9, color='k', ls='--', lw=1) + # Set limits + axs[0].set_xlim(Frequency[0]*1e-9, Frequency[-1]*1e-9) + axs[0].set_ylim(bottom=-0.5) + axs[2].set_ylim(top=100.5) + # Plot optimal frequency + for i in range(3): + lims = axs[i].get_ylim() + axs[i].text(f_optimal*1e-9, np.mean(lims), + f'${f_optimal*1e-9:.4f}$ GHz', + ha='center', va='center', rotation=90, size=8, + bbox=dict(boxstyle='round', facecolor='white', edgecolor='None')) + # Format labels + axs[0].set_ylabel('$P_{|0\\rangle}$ (%)') + axs[1].set_ylabel('$P_{|1\\rangle}$ (%)') + axs[2].set_ylabel('$P_{|2\\rangle}$ (%)') + axs[2].set_xlabel('Frequency (GHz)') + axs[0].set_title(f'{timestamp}\n{qubit} LRU pulse frequency sweep') \ No newline at end of file diff --git a/pycqed/analysis_v2/Parity_benchmark_analysis.py b/pycqed/analysis_v2/Parity_benchmark_analysis.py index 6375853378..e0b4b7c461 100644 --- a/pycqed/analysis_v2/Parity_benchmark_analysis.py +++ b/pycqed/analysis_v2/Parity_benchmark_analysis.py @@ -1,15 +1,20 @@ import numpy as np import matplotlib.pyplot as plt import itertools -# import time -# import cvxpy +import time +import cvxpy import copy import pycqed.analysis_v2.disturbancecalc as pb import pycqed.analysis_v2.base_analysis as ba from pycqed.analysis.analysis_toolbox import get_datafilepath_from_timestamp +from pycqed.utilities.general import get_nearest_neighbors import pycqed.measurement.hdf5_data as h5d import os -# from mpl_toolkits.mplot3d import Axes3D +from mpl_toolkits.mplot3d import Axes3D +from scipy import linalg +import matplotlib +from matplotlib.colors import to_rgba +import itertools plt.rcdefaults() #################################### @@ -123,18 +128,18 @@ def compute_metrics(p, e1, e2, n_data_points): D_tvd_double, D_ovd_double, r_double, Disturbances_ovd_double) - class Sandia_parity_benchmark(ba.BaseDataAnalysis): """ Multiplexed readout analysis. Does data binning and creates histograms of data. Threshold is auto determined as the mean of the data. - Used to construct a assignment probability matris. + Used to construct a assignment probability matrix. WARNING: Not sure if post selection supports measurement data in two quadratures. Should use optimal weights if using post-selection. + Reference: Done during IARPA, Sandia group """ def __init__(self, @@ -233,7 +238,7 @@ def prepare_plots(self): fig.add_subplot(512), fig.add_subplot(513), fig.add_subplot(527)] - fig.patch.set_alpha(0) + # fig.patch.set_alpha(0) self.axs_dict['main'] = axs self.figs['main'] = fig self.plot_dicts['main'] = { @@ -270,128 +275,190 @@ def run_post_extract(self): close_figs=self.options_dict.get('close_figs', True), tag_tstamp=self.options_dict.get('tag_tstamp', True)) +def plot_function(P0, P1, P, E1, E2, M1, M2, + Disturbances_ovd_single, r_single, + Disturbances_ovd_double, r_double, + timestamp, + ax, **kw): + fig = ax[0].get_figure() + # Calibration 0 + ax[0].set_title(r'Reference $|0000\rangle$') + ax[0].axhline(1., color='black', alpha=.5, linestyle='--') + ax[0].bar(np.arange(0,16), [P0[k] for k in P0.keys()], color='C0') + ax[0].set_ylim(0, 1.05) + ax[0].set_xticks([0,5,10,15]) + ax[0].set_yticks([0, .5, 1]) + ax[0].set_xticklabels(['{:04b}'.format(5*i) for i in range(4)], rotation=45, fontsize=8) + ax[0].set_yticklabels([0, 0.5, 1]) + ax[0].set_ylabel('Fraction') + # Calibration 1 + ax[1].set_title(r'Reference $|1111\rangle$') + ax[1].axhline(1., color='black', alpha=.5, linestyle='--') + ax[1].bar(np.arange(0,16), [P1[k] for k in P1.keys()], color='C0') + ax[1].set_ylim(0, 1.05) + ax[1].set_xticks([0,5,10,15]) + ax[1].set_yticks([0, .5, 1]) + ax[1].set_xticklabels(['{:04b}'.format(5*i) for i in range(4)], rotation=45, fontsize=8) + ax[1].set_yticklabels(['', '', '']) + # Single parity + ax[2].set_title('Single parity check') + ax[2].axhline(.5, color='black', alpha=.5, linestyle='--') + ax[2].bar(np.arange(0,16), [P[k] for k in P.keys()], color='C0', alpha=.25, label='calibration') + ax[2].bar(np.arange(0,16), [E1[k] for k in E1.keys()], color='C0', label='parity check') + ax[2].set_ylim(0, .525) + ax[2].set_yticks([0, .25, .5]) + ax[2].set_xticks(np.arange(0,16)) + ax[2].set_xticklabels(['{:04b}'.format(i) for i in range(16)], rotation=45, fontsize=8) + ax[2].set_yticklabels([0, 0.25, 0.5]) + ax[2].set_xlabel('measured state') + ax[2].set_ylabel('Fraction') + ax[2].legend(bbox_to_anchor=(1.025, 1), loc='upper left') + # Repeated parity + ax[3].set_title('Repeated parity check') + ax[3].axhline(.5, color='black', alpha=.5, linestyle='--') + ax[3].bar(np.arange(0,16), [P[k] for k in P.keys()], color='C0', alpha=.25, label='calibration') + ax[3].bar(np.arange(0,16), [E1[k] for k in E1.keys()], color='C1', label='single parity check') + ax[3].bar(np.arange(0,16), [E2[k] for k in E2.keys()], color='C0', label='double parity check') + ax[3].set_ylim(0, .525) + ax[3].set_yticks([0, .25, .5]) + ax[3].set_xticks(np.arange(0,16)) + ax[3].set_xticklabels(['{:04b}'.format(i) for i in range(16)], rotation=45, fontsize=8) + ax[3].set_yticklabels([0, 0.25, 0.5]) + ax[3].set_xlabel('measured state') + ax[3].set_ylabel('Fraction') + ax[3].legend(bbox_to_anchor=(1.025, 1), loc='upper left', fontsize=6) + # Parity outcome results + ax[4].set_title('Parity results') + ax[4].axhline(1, color='black', alpha=.5, linestyle='--') + ax[4].axhline(-1, color='black', alpha=.5, linestyle='--') + ax[4].bar([1, 2], [1-M1*2, 1-M2*2]) + ax[4].set_ylim(-1.1, 1.1) + ax[4].set_xticks([1,2]) + ax[4].set_xticklabels([r'$\langle m_1\rangle$', r'$\langle m_2\rangle$']) + textstr1 = '\n'.join(('', + '$D_1^{ovd}$ = %f $\pm$ %f' % (Disturbances_ovd_single[0][0], Disturbances_ovd_single[0][1]), + '$D_2^{ovd}$ = %f $\pm$ %f' % (Disturbances_ovd_single[1][0], Disturbances_ovd_single[1][1]), + '$\Delta^{ovd}$ = %f $\pm$ %f' % (Disturbances_ovd_single[2][0]+Disturbances_ovd_single[3][0], Disturbances_ovd_single[2][1]+Disturbances_ovd_single[3][1]), + '$r$ = %f' % (r_single))) + + textstr2 = '\n'.join(('Repeatability = %.1f%%' % ((1-M2)*100), + '$D_1^{ovd}$ = %f $\pm$ %f' % (Disturbances_ovd_double[0][0], Disturbances_ovd_double[0][1]), + '$D_2^{ovd}$ = %f $\pm$ %f' % (Disturbances_ovd_double[1][0], Disturbances_ovd_double[1][1]), + '$\Delta^{ovd}$ = %f $\pm$ %f' % (Disturbances_ovd_double[2][0]+Disturbances_ovd_double[3][0], Disturbances_ovd_double[2][1]+Disturbances_ovd_double[3][1]), + '$r$ = %f' % (r_double))) + + props = dict(boxstyle='round', facecolor='gray', alpha=0.15) + fig.tight_layout() + ax[4].text(1.08, 1.25, 'Single parity', transform=ax[4].transAxes, fontsize=12, + verticalalignment='top') + ax[4].text(1.1, 0.95, textstr1, transform=ax[4].transAxes, fontsize=10, + verticalalignment='top', bbox=props) + + ax[4].text(2.25, 1.25, 'Repeated parity', transform=ax[4].transAxes, fontsize=12, + verticalalignment='top') + ax[4].text(2.27, 0.95, textstr2, transform=ax[4].transAxes, fontsize=10, + verticalalignment='top', bbox=props) -def get_expected_value(operator, state): + fig.suptitle(f'Sandia parity benchmark {timestamp}', y=1.01, x=.43) + + +def get_expected_value(operator, state, n): m = 1 - for i in range(4): + for i in range(n): if operator[i] == 'Z' and state[i] == '1': m *= -1 return m - -def gen_M_matrix(): - Operators = [op1+op2+op3+op4 for op1 in ['I', 'Z'] - for op2 in ['I', 'Z'] - for op3 in ['I', 'Z'] - for op4 in ['I', 'Z']] - Cal_points = [s1+s2+s3+s4 for s1 in ['0', '1'] - for s2 in ['0', '1'] - for s3 in ['0', '1'] - for s4 in ['0', '1']] - M = np.zeros((16,16), dtype=int) + +def gen_M_matrix(n): + # List of different Operators + ops = ['I','Z'] + Operators = [''.join(op) for op in itertools.product(ops, repeat=n)] + # List of calibration points + states = ['0','1'] + Cal_points = [''.join(s) for s in itertools.product(states, repeat=n)] + # Calculate M matrix + M = np.zeros((2**n, 2**n), dtype=int) for j, state in enumerate(Cal_points): Betas = np.ones(len(Operators)) - for i in range(16): - Betas[i] = get_expected_value(Operators[i], state) + for i in range(2**n): + Betas[i] = get_expected_value(Operators[i], state, n) M[j] = Betas M = np.linalg.pinv(M) # invert matrix return M -def get_Beta_matrix(Cal_D1_dig, Cal_D2_dig, Cal_D3_dig, Cal_D4_dig): - Operators = [op1+op2+op3+op4 for op1 in ['I', 'Z'] - for op2 in ['I', 'Z'] - for op3 in ['I', 'Z'] - for op4 in ['I', 'Z']] +def get_Beta_matrix(Cal_shots_dig, n): + # List of different Operators + ops = ['I','Z'] + Operators = [''.join(op) for op in itertools.product(ops, repeat=n)] + # List of qubits + Qubits = list(Cal_shots_dig.keys()) + # Calculate Beta matrix H = {} B = {} - M = gen_M_matrix() + M = gen_M_matrix(n) for op in Operators[1:]: - H[op] = np.zeros(16) - for i, state in enumerate(Cal_D1_dig.keys()): + H[op] = np.zeros(2**n) + for i, state in enumerate(Cal_shots_dig[Qubits[0]].keys()): correlator = 1 - if op[0] == 'Z': - correlator *= np.array(Cal_D1_dig[state]) - if op[1] == 'Z': - correlator *= np.array(Cal_D2_dig[state]) - if op[2] == 'Z': - correlator *= np.array(Cal_D3_dig[state]) - if op[3] == 'Z': - correlator *= np.array(Cal_D4_dig[state]) + for j, qubit in enumerate(Qubits): + if op[j] == 'Z': + correlator *= np.array(Cal_shots_dig[Qubits[j]][state]) H[op][i] = np.mean(correlator) B[op] = np.dot(M, H[op]) return B +def gen_gate_order(n): + # Gate order in experiment + tomo_gates = ['Z', 'X', 'Y'] + Gate_order = [''.join(op)[::-1] for op in itertools.product(tomo_gates, repeat=n)] + return np.array(Gate_order) -def gen_gate_order(): - # Gate order in experiment - tomo_gates = ['Z', 'X', 'Y'] - Gate_order = [] - i = 0 - for pq4 in tomo_gates: - for pq3 in tomo_gates: - for pq2 in tomo_gates: - for pq1 in tomo_gates: - Gate_order.append(pq1+pq2+pq3+pq4) - i+=1 - return np.array(Gate_order) - -def gen_4Q_pauli(): +def gen_n_Q_pauli(n): # Single qubit pauli terms - Pauli_terms = {} - Pauli_terms['I'] = np.array([[ 1, 0], - [ 0, 1]]) - Pauli_terms['Z'] = np.array([[ 1, 0], - [ 0, -1]]) - Pauli_terms['X'] = np.array([[ 0, 1], - [ 1, 0]]) - Pauli_terms['Y'] = np.array([[ 0,-1j], - [ 1j, 0]]) + Pauli_operators = {} + Pauli_operators['I'] = np.array([[ 1, 0], + [ 0, 1]]) + Pauli_operators['Z'] = np.array([[ 1, 0], + [ 0, -1]]) + Pauli_operators['X'] = np.array([[ 0, 1], + [ 1, 0]]) + Pauli_operators['Y'] = np.array([[ 0,-1j], + [ 1j, 0]]) # Four qubit pauli terms pauli_ops = ['I', 'X', 'Y', 'Z'] - Pauli_terms_4 = {} - for p1 in pauli_ops: - for p2 in pauli_ops: - for p3 in pauli_ops: - for p4 in pauli_ops: - op = p1+p2+p3+p4 - Pauli_terms_4[op]=np.kron(np.kron(np.kron(Pauli_terms[p1],Pauli_terms[p2]), - Pauli_terms[p3]),Pauli_terms[p4]) - return Pauli_terms_4 - -def get_Pauli_expectation_values(Beta_matrix, Gate_order, Mask, Tomo_meas_D1_dig, - Tomo_meas_D2_dig, - Tomo_meas_D3_dig, - Tomo_meas_D4_dig): + Pauli_terms = {} + Operators = [''.join(op) for op in itertools.product(pauli_ops, repeat=n)] + for Op in Operators: + Pauli_terms[Op] = Pauli_operators[Op[0]] + for op in Op[1:]: + Pauli_terms[Op]=np.kron(Pauli_terms[Op], Pauli_operators[op]) + return Pauli_terms + +def get_Pauli_expectation_values(Beta_matrix, Gate_order, Mask, Tomo_shots_dig): ''' Calculates Pauli expectation values (PEVs) in three steps: 1. Calculate raw PEVs. 2. Condition (post-select) data on no errors in stabilizers. 3. Apply readout corrections to PEVs based on Beta matarix. ''' + Qubits = list(Tomo_shots_dig.keys())[1:] + n = len(Qubits) + B_matrix = np.array([Beta_matrix[key][1:] for key in Beta_matrix.keys()]) B_0 = np.array([Beta_matrix[key][0] for key in Beta_matrix.keys()]) iB_matrix = np.linalg.inv(B_matrix) - P_values = {op1+op2+op3+op4: [] for op1 in ['I', 'X', 'Y', 'Z'] - for op2 in ['I', 'X', 'Y', 'Z'] - for op3 in ['I', 'X', 'Y', 'Z'] - for op4 in ['I', 'X', 'Y', 'Z']} - + pauli_ops = ['I', 'X', 'Y', 'Z'] + P_values = {''.join(op):[] for op in itertools.product(pauli_ops, repeat=n)} P_frac = copy.deepcopy(P_values) for i, pre_rotation in enumerate(Gate_order[:]): - P_vector = { p1+p2+p3+p4 : 1 for p1 in ['I', pre_rotation[0]] - for p2 in ['I', pre_rotation[1]] - for p3 in ['I', pre_rotation[2]] - for p4 in ['I', pre_rotation[3]]} + combs = [('I', op) for op in pre_rotation ] + P_vector = {''.join(o):1 for o in itertools.product(*combs)} for correlator in P_vector.keys(): # Calculate raw PEVs C = 1 - if correlator[3] != 'I': - C *= np.array(Tomo_meas_D1_dig[i], dtype=float) - if correlator[2] != 'I': - C *= np.array(Tomo_meas_D2_dig[i], dtype=float) - if correlator[1] != 'I': - C *= np.array(Tomo_meas_D3_dig[i], dtype=float) - if correlator[0] != 'I': - C *= np.array(Tomo_meas_D4_dig[i], dtype=float) + for j, qubit in enumerate(Qubits): + if correlator[n-j-1] != 'I': + C *= np.array(Tomo_shots_dig[qubit][i], dtype=float) # Post-select data on stabilizer measurements C = C*Mask[i] n_total = len(C) @@ -410,13 +477,12 @@ def get_Pauli_expectation_values(Beta_matrix, Gate_order, Mask, Tomo_meas_D1_dig for key in P_values: P_values[key] = np.mean(P_values[key]) # Calculate density matrix - Pauli_terms_4 = gen_4Q_pauli() - rho = np.zeros((16,16))*(1+0*1j) - for op in Pauli_terms_4.keys(): - rho += P_values[op]*Pauli_terms_4[op]/16 + Pauli_terms_n = gen_n_Q_pauli(n) + rho = np.zeros((2**n,2**n))*(1+0*1j) + for op in Pauli_terms_n.keys(): + rho += P_values[op]*Pauli_terms_n[op]/2**n return P_values, rho, P_frac -from scipy import linalg def fidelity(rho_1, rho_2, trace_conserved = False): if trace_conserved: if np.round(np.trace(rho_1), 3) !=1: @@ -428,9 +494,12 @@ def fidelity(rho_1, rho_2, trace_conserved = False): pos_eig = [vals for vals in eig_vals if vals > 0] return float(np.sum(np.real(np.sqrt(pos_eig))))**2 -class Weight_4_parity_tomography(ba.BaseDataAnalysis): +class Weight_n_parity_tomography(ba.BaseDataAnalysis): def __init__(self, sim_measurement: bool, + n_rounds: int, + post_selection: bool, + exception_qubits: list = [], t_start: str = None, t_stop: str = None, label: str = '', @@ -445,6 +514,9 @@ def __init__(self, extract_only=extract_only) self.sim_measurement = sim_measurement + self.n_rounds = n_rounds + self.post_selection = post_selection + self.exception_qubits = exception_qubits if auto: self.run_analysis() @@ -470,24 +542,29 @@ def extract_data(self): def process_data(self): self.proc_data_dict = {} - Qubits = [ name.decode()[-2:] for name in self.raw_data_dict['value_names'] ] + # Note: .decode() is necessary for old compression version of HDF5 files + Qubits = [(name.decode() if isinstance(name, bytes) else name).split(' ')[-1] for name in self.raw_data_dict['value_names'] ] Data_qubits = [ q for q in Qubits if 'D' in q ] Anc_qubit = [ q for q in Qubits if ('X' in q) or ('Z' in q) ][0] + n = len(Data_qubits) self.Qubits = Qubits ############################ # Sort calibration Shots ############################ Cal_shots = {q : {} for q in Qubits} Cal_shots_dig = {q : {} for q in Data_qubits} - combinations = [s1+s2+s3+s4+s5 for s1 in ['0', '1'] - for s2 in ['0', '1'] - for s3 in ['0', '1'] - for s4 in ['0', '1'] - for s5 in ['0', '1']] + states = ['0','1'] + combinations = [''.join(s) for s in itertools.product(states, repeat=n+1)] + if self.sim_measurement: - cycle = 81 + cycle = 3**n*self.n_rounds else: - cycle = 81*2 + cycle = 3**n*(self.n_rounds+1) + + ## NB: Ps is not yet implemented + if self.post_selection: + cycle*=2 + Thresholds = {} self.proc_data_dict['Shots_0'] = {} self.proc_data_dict['Shots_1'] = {} @@ -495,11 +572,14 @@ def process_data(self): Shots_0 = [] Shots_1 = [] for j, comb in enumerate(combinations): - Cal_shots[qubit][comb] = self.raw_data_dict['data'][:,i+1][cycle+j::cycle+32] + Cal_shots[qubit][comb] = self.raw_data_dict['data'][:,i+1][cycle+j::cycle+2**(n+1)] if comb[i] == '0': Shots_0+=list(Cal_shots[qubit][comb]) else: Shots_1+=list(Cal_shots[qubit][comb]) + _s_0, _s_1 = np.mean(Shots_0), np.mean(Shots_1) + if _s_1 < _s_0: + Shots_0, Shots_1 = -np.array(Shots_0), -np.array(Shots_1) Thresholds[qubit] = estimate_threshold(Shots_0, Shots_1) self.proc_data_dict['Shots_0'][qubit] = Shots_0 self.proc_data_dict['Shots_1'][qubit] = Shots_1 @@ -508,48 +588,73 @@ def digitize(shots, threshold): dig_shots = [ +1 if s dz[k] else dz[k]-dz1[k] for k in s ] ax.bar3d(xpos[s], ypos[s], Z, dx, dy, dz=DZ, zsort='min', color=colors, edgecolor=to_rgba('black', .25), linewidth=.4) - ax.set_xticks([ -.5, 4.5, 9.5, 14.5]) - ax.set_yticks([0.25, 5.25,10.25,15.25]) - ax.set_xticklabels(['0000', '0101', '1010', '1111'], rotation=20, fontsize=6, ha='right') - ax.set_yticklabels(['0000', '0101', '1010', '1111'], rotation=-40, fontsize=6) - ax.tick_params(axis='x', which='major', pad=-6) - ax.tick_params(axis='y', which='major', pad=-6) - ax.tick_params(axis='z', which='major', pad=-2) + N = int(np.log2(n)) + states = ['0', '1'] + combs = [''.join(s) for s in itertools.product(states, repeat=N)] + tick_period = n//(3-2*(N%2)) - N%2 + ax.set_xticks(xpos[::n][::tick_period]+1/n/2) + ax.set_yticks(ypos[:n:tick_period]+1/n/2) + ax.set_xticklabels(combs[::tick_period], rotation=20, fontsize=6, ha='right') + ax.set_yticklabels(combs[::tick_period], rotation=-40, fontsize=6) + ax.tick_params(axis='x', which='major', pad=-6, labelsize=6) + ax.tick_params(axis='y', which='major', pad=-6, labelsize=6) + ax.tick_params(axis='z', which='major', pad=-2, labelsize=6) for tick in ax.yaxis.get_majorticklabels(): tick.set_horizontalalignment("left") ax.set_zticks(np.linspace(0, .5, 5)) - ax.set_zticklabels(['0', '', '0.25', '', '0.5'], fontsize=6) + ax.set_zticklabels(['0', '', '0.25', '', '0.5']) ax.set_zlim(0, .5) - ax.set_zlabel(r'$|\rho|$', labelpad=-123) + ax.set_zlabel(r'$|\rho|$', labelpad=-8, size=7) ax.set_title(title, size=7) # Text box s = ''.join((r'$F_{|\psi\rangle}='+fr'{Fid*100:.1f}\%$', '\n', - r'$\mathrm{arg}(\rho_{0,15})='+fr'{angle:.1f}^\circ$', '\n', - r'$P_\mathrm{ps}='+fr'{Ps_frac*100:.1f}\%$')) + r'$\mathrm{arg}(\rho_{0,'+f'{n-1}'+'})='+fr'{angle:.1f}^\circ$', '\n', + r'$P_\mathrm{ps}='+fr'{Ps_frac*100:.1f}\%$', '\n', + f'# shots per Pauli {nr_shots}')) props = dict(boxstyle='round', facecolor='white', edgecolor='gray', alpha=1) - ax.text(2, 0, .4, s, size=6, bbox=props) - fig.tight_layout() + ax.text(.5, 1, .5, s, size=5, bbox=props, va='bottom') # colorbar fig.subplots_adjust(bottom=0.1) - cbar_ax = fig.add_axes([0.525, 0.6, 0.01, 0.275]) + cbar_ax = fig.add_axes([0.55, 0.56, 0.01, 0.275]) cmap = matplotlib.colors.LinearSegmentedColormap.from_list("", ["C3",'darkseagreen',"C0",'antiquewhite',"C3"]) sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) cb = matplotlib.colorbar.ColorbarBase(cbar_ax, cmap=cmap, norm=norm, @@ -733,94 +876,1995 @@ def plot_calibration(qubits, p0, p1, thresholds, ax[i].set_yticks([]) fig.tight_layout() -def plot_function(P0, P1, P, E1, E2, M1, M2, - Disturbances_ovd_single, r_single, - Disturbances_ovd_double, r_double, - timestamp, - ax, **kw): +def plot_repeatabilityfn(M1, M2, repeatability, P_dist, + ancilla, data_qubits, timestamp, + ax, **kw): fig = ax[0].get_figure() - # Calibration 0 - ax[0].set_title(r'Reference $|0000\rangle$') - ax[0].axhline(1., color='black', alpha=.5, linestyle='--') - ax[0].bar(np.arange(0,16), [P0[k] for k in P0.keys()], color='C0') - ax[0].set_ylim(0, 1.05) - ax[0].set_xticks([0,5,10,15]) - ax[0].set_yticks([0, .5, 1]) - ax[0].set_xticklabels(['{:04b}'.format(5*i) for i in range(4)], rotation=45, fontsize=8) - ax[0].set_yticklabels([0, 0.5, 1]) - ax[0].set_ylabel('Fraction') - # Calibration 1 - ax[1].set_title(r'Reference $|1111\rangle$') - ax[1].axhline(1., color='black', alpha=.5, linestyle='--') - ax[1].bar(np.arange(0,16), [P1[k] for k in P1.keys()], color='C0') - ax[1].set_ylim(0, 1.05) - ax[1].set_xticks([0,5,10,15]) - ax[1].set_yticks([0, .5, 1]) - ax[1].set_xticklabels(['{:04b}'.format(5*i) for i in range(4)], rotation=45, fontsize=8) - ax[1].set_yticklabels(['', '', '']) - # Single parity - ax[2].set_title('Single parity check') - ax[2].axhline(.5, color='black', alpha=.5, linestyle='--') - ax[2].bar(np.arange(0,16), [P[k] for k in P.keys()], color='C0', alpha=.25, label='calibration') - ax[2].bar(np.arange(0,16), [E1[k] for k in E1.keys()], color='C0', label='parity check') - ax[2].set_ylim(0, .525) - ax[2].set_yticks([0, .25, .5]) - ax[2].set_xticks(np.arange(0,16)) - ax[2].set_xticklabels(['{:04b}'.format(i) for i in range(16)], rotation=45, fontsize=8) - ax[2].set_yticklabels([0, 0.25, 0.5]) - ax[2].set_xlabel('measured state') - ax[2].set_ylabel('Fraction') - ax[2].legend(bbox_to_anchor=(1.025, 1), loc='upper left') - # Repeated parity - ax[3].set_title('Repeated parity check') - ax[3].axhline(.5, color='black', alpha=.5, linestyle='--') - ax[3].bar(np.arange(0,16), [P[k] for k in P.keys()], color='C0', alpha=.25, label='calibration') - ax[3].bar(np.arange(0,16), [E1[k] for k in E1.keys()], color='C1', label='single parity check') - ax[3].bar(np.arange(0,16), [E2[k] for k in E2.keys()], color='C0', label='double parity check') - ax[3].set_ylim(0, .525) - ax[3].set_yticks([0, .25, .5]) - ax[3].set_xticks(np.arange(0,16)) - ax[3].set_xticklabels(['{:04b}'.format(i) for i in range(16)], rotation=45, fontsize=8) - ax[3].set_yticklabels([0, 0.25, 0.5]) - ax[3].set_xlabel('measured state') - ax[3].set_ylabel('Fraction') - ax[3].legend(bbox_to_anchor=(1.025, 1), loc='upper left', fontsize=6) - # Parity outcome results - ax[4].set_title('Parity results') - ax[4].axhline(1, color='black', alpha=.5, linestyle='--') - ax[4].axhline(-1, color='black', alpha=.5, linestyle='--') - ax[4].bar([1, 2], [M1*2-1, M2*2-1]) - ax[4].set_ylim(-1.1, 1.1) - ax[4].set_xticks([1,2]) - ax[4].set_xticklabels([r'$\langle m_1\rangle$', r'$\langle m_2\rangle$']) - textstr1 = '\n'.join(('', - '$D_1^{ovd}$ = %f $\pm$ %f' % (Disturbances_ovd_single[0][0], Disturbances_ovd_single[0][1]), - '$D_2^{ovd}$ = %f $\pm$ %f' % (Disturbances_ovd_single[1][0], Disturbances_ovd_single[1][1]), - '$\Delta^{ovd}$ = %f $\pm$ %f' % (Disturbances_ovd_single[2][0]+Disturbances_ovd_single[3][0], Disturbances_ovd_single[2][1]+Disturbances_ovd_single[3][1]), - '$r$ = %f' % (r_single))) + ax[0].bar([r'$\langle M_1\rangle$', r'$\langle M_2\rangle$'], [M1, M2]) + ax[0].set_ylim(-1.05, 1.05) + ax[0].set_yticks([-1, -.5, 0, .5, 1]) + ax[0].set_yticklabels(['-1', '', '0', '', '1']) + ax[0].set_title(f'{ancilla} measurement results') + ax[0].text(-.4, -.9, f'Repeatability : {repeatability*100:.1f}%') + + states = ['0', '1'] + n = len(data_qubits) + combs = np.array([''.join(s) for s in itertools.product(states, repeat=n)]) + idx_sort = np.argsort([ s.count('1') for s in combs ]) + ax[1].bar(combs[idx_sort], P_dist[idx_sort]) + ax[1].set_xticklabels(combs[idx_sort], rotation=90) + ax[1].set_title(f'{" ".join(data_qubits[::-1])} measurement probability') + fig.suptitle(f'{timestamp}\nRepeatability measurement {ancilla}', y=1.2) + + +def _calculate_fid_and_threshold(x0, n0, x1, n1): + """ + Calculate fidelity and threshold from histogram data: + x0, n0 is the histogram data of shots 0 (value and occurences), + x1, n1 is the histogram data of shots 1 (value and occurences). + """ + # Build cumulative histograms of shots 0 + # and 1 in common bins by interpolation. + all_x = np.unique(np.sort(np.concatenate((x0, x1)))) + cumsum0, cumsum1 = np.cumsum(n0), np.cumsum(n1) + ecumsum0 = np.interp(x=all_x, xp=x0, fp=cumsum0, left=0) + necumsum0 = ecumsum0/np.max(ecumsum0) + ecumsum1 = np.interp(x=all_x, xp=x1, fp=cumsum1, left=0) + necumsum1 = ecumsum1/np.max(ecumsum1) + # Calculate optimal threshold and fidelity + F_vs_th = (1-(1-abs(necumsum0 - necumsum1))/2) + opt_idxs = np.argwhere(F_vs_th == np.amax(F_vs_th)) + opt_idx = int(round(np.average(opt_idxs))) + F_assignment_raw = F_vs_th[opt_idx] + threshold_raw = all_x[opt_idx] + return F_assignment_raw, threshold_raw + +def _get_threshold(Shots_0, Shots_1): + # Take relavant quadrature + shots_0 = Shots_0[:,0] + shots_1 = Shots_1[:,0] + # find range + _all_shots = np.concatenate((shots_0, shots_1)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x0, n0 = np.unique(shots_0, return_counts=True) + x1, n1 = np.unique(shots_1, return_counts=True) + Fid, threshold = _calculate_fid_and_threshold(x0, n0, x1, n1) + return threshold + +def _gauss_pdf(x, x0, sigma): + return np.exp(-((x-x0)/sigma)**2/2) + +def double_gauss(x, x0, x1, sigma0, sigma1, A, r): + _dist0 = A*( (1-r)*_gauss_pdf(x, x0, sigma0) + r*_gauss_pdf(x, x1, sigma1) ) + return _dist0 + +def _double_gauss_joint(x, x0, x1, sigma0, sigma1, A0, A1, r0, r1): + _dist0 = double_gauss(x, x0, x1, sigma0, sigma1, A0, r0) + _dist1 = double_gauss(x, x1, x0, sigma1, sigma0, A1, r1) + return np.concatenate((_dist0, _dist1)) + +def _fit_double_gauss(x_vals, hist_0, hist_1): + ''' + Fit two histograms to a double gaussian with + common parameters. From fitted parameters, + calculate SNR, Pe0, Pg1, Teff, Ffit and Fdiscr. + ''' + from scipy.optimize import curve_fit + # Double gaussian model for fitting + def _gauss_pdf(x, x0, sigma): + return np.exp(-((x-x0)/sigma)**2/2) + global double_gauss + def double_gauss(x, x0, x1, sigma0, sigma1, A, r): + _dist0 = A*( (1-r)*_gauss_pdf(x, x0, sigma0) + r*_gauss_pdf(x, x1, sigma1) ) + return _dist0 + # helper function to simultaneously fit both histograms with common parameters + def _double_gauss_joint(x, x0, x1, sigma0, sigma1, A0, A1, r0, r1): + _dist0 = double_gauss(x, x0, x1, sigma0, sigma1, A0, r0) + _dist1 = double_gauss(x, x1, x0, sigma1, sigma0, A1, r1) + return np.concatenate((_dist0, _dist1)) + # Guess for fit + pdf_0 = hist_0/np.sum(hist_0) # Get prob. distribution + pdf_1 = hist_1/np.sum(hist_1) # + _x0_guess = np.sum(x_vals*pdf_0) # calculate mean + _x1_guess = np.sum(x_vals*pdf_1) # + _sigma0_guess = np.sqrt(np.sum((x_vals-_x0_guess)**2*pdf_0)) # calculate std + _sigma1_guess = np.sqrt(np.sum((x_vals-_x1_guess)**2*pdf_1)) # + _r0_guess = 0.01 + _r1_guess = 0.05 + _A0_guess = np.max(hist_0) + _A1_guess = np.max(hist_1) + p0 = [_x0_guess, _x1_guess, _sigma0_guess, _sigma1_guess, _A0_guess, _A1_guess, _r0_guess, _r1_guess] + # Bounding parameters + _x0_bound = (-np.inf,np.inf) + _x1_bound = (-np.inf,np.inf) + _sigma0_bound = (0,np.inf) + _sigma1_bound = (0,np.inf) + _r0_bound = (0,1) + _r1_bound = (0,1) + _A0_bound = (0,np.inf) + _A1_bound = (0,np.inf) + bounds = np.array([_x0_bound, _x1_bound, _sigma0_bound, _sigma1_bound, + _A0_bound, _A1_bound, _r0_bound, _r1_bound]) + # Fit parameters within bounds + popt, pcov = curve_fit( + _double_gauss_joint, x_vals, + np.concatenate((hist_0, hist_1)), + p0=p0, bounds=bounds.transpose()) + popt0 = popt[[0,1,2,3,4,6]] + popt1 = popt[[1,0,3,2,5,7]] + # Calculate quantities of interest + SNR = abs(popt0[0] - popt1[0])/((abs(popt0[2])+abs(popt1[2]))/2) + P_e0 = popt0[5] + P_g1 = popt1[5] + # Fidelity from fit + _range = (np.min(x_vals), np.max(x_vals)) + _x_data = np.linspace(*_range, 10001) + _h0 = double_gauss(_x_data, *popt0)# compute distrubition from + _h1 = double_gauss(_x_data, *popt1)# fitted parameters. + Fid_fit, threshold_fit = _calculate_fid_and_threshold(_x_data, _h0, _x_data, _h1) + # Discrimination fidelity + _h0 = double_gauss(_x_data, *popt0[:-1], 0)# compute distrubition without residual + _h1 = double_gauss(_x_data, *popt1[:-1], 0)# excitation of relaxation. + Fid_discr, threshold_discr = _calculate_fid_and_threshold(_x_data, _h0, _x_data, _h1) + # return results + qoi = { 'SNR': SNR, + 'P_e0': P_e0, 'P_g1': P_g1, + 'Fid_fit': Fid_fit, 'Fid_discr': Fid_discr } + return popt0, popt1, qoi + +def _decision_boundary_points(coefs, intercepts): + ''' + Find points along the decision boundaries of + LinearDiscriminantAnalysis (LDA). + This is performed by finding the interception + of the bounds of LDA. For LDA, these bounds are + encoded in the coef_ and intercept_ parameters + of the classifier. + Each bound is given by the equation: + y + coef_i[0]/coef_i[1]*x + intercept_i = 0 + Note this only works for LinearDiscriminantAnalysis. + Other classifiers might have diferent bound models. + ''' + points = {} + # Cycle through model coeficients + # and intercepts. + n = len(intercepts) + if n == 3: + _bounds = [[0,1], [1,2], [0,2]] + if n == 4: + _bounds = [[0,1], [1,2], [2,3], [0,3]] + for i, j in _bounds: + c_i = coefs[i] + int_i = intercepts[i] + c_j = coefs[j] + int_j = intercepts[j] + x = (- int_j/c_j[1] + int_i/c_i[1])/(-c_i[0]/c_i[1] + c_j[0]/c_j[1]) + y = -c_i[0]/c_i[1]*x - int_i/c_i[1] + points[f'{i}{j}'] = (x, y) + # Find mean point + points['mean'] = np.mean([ [x, y] for (x, y) in points.values()], axis=0) + return points + +def _rotate_and_center_data(I, Q, vec0, vec1, phi=0): + vector = vec1-vec0 + angle = np.arctan(vector[1]/vector[0]) + rot_matrix = np.array([[ np.cos(-angle+phi),-np.sin(-angle+phi)], + [ np.sin(-angle+phi), np.cos(-angle+phi)]]) + proc = np.array((I, Q)) + proc = np.dot(rot_matrix, proc) + return proc.transpose() + +def _calculate_defect_rate(Shots, n_rounds, with_reset=False): + ''' + Shots must be a dictionary with format: + |<---nr_shots--->| + Shots['round '] = np.array([0/1,......., 0/1]) + ''' + Deffect_rate = {} + nr_shots = len(Shots['round 1']) + if not with_reset: + # M array is measured data + # P array is parity data + # D array is defect data + M_values = np.ones((nr_shots, n_rounds)) + for r in range(n_rounds): + # Convert to +1 and -1 values + M_values[:,r] *= 1-2*(Shots[f'round {r+1}']) + + P_values = np.hstack( (np.ones((nr_shots, 2)), M_values) ) + P_values = P_values[:,1:] * P_values[:,:-1] + else: + # P array is parity data + # D array is defect data + P_values = np.ones((nr_shots, n_rounds)) + for r in range(n_rounds): + # Convert to +1 and -1 values + P_values[:,r] *= 1-2*(Shots[f'round {r+1}']) + D_values = P_values[:,1:] * P_values[:,:-1] + Deffect_rate = [ np.nanmean(1-D_values[:,i])/2 for i in range(n_rounds)] + return Deffect_rate + +# Not used??? +class Repeated_stabilizer_measurement_analysis(ba.BaseDataAnalysis): + def __init__(self, + qubit: str, + n_rounds: int, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + auto=True + ): - textstr2 = '\n'.join(('Repeatability = %.1f%%' % (M2*100), - '$D_1^{ovd}$ = %f $\pm$ %f' % (Disturbances_ovd_double[0][0], Disturbances_ovd_double[0][1]), - '$D_2^{ovd}$ = %f $\pm$ %f' % (Disturbances_ovd_double[1][0], Disturbances_ovd_double[1][1]), - '$\Delta^{ovd}$ = %f $\pm$ %f' % (Disturbances_ovd_double[2][0]+Disturbances_ovd_double[3][0], Disturbances_ovd_double[2][1]+Disturbances_ovd_double[3][1]), - '$r$ = %f' % (r_double))) + super().__init__(t_start=t_start, t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) + self.qubit = qubit + self.n_rounds = n_rounds - props = dict(boxstyle='round', facecolor='gray', alpha=0.15) + if auto: + self.run_analysis() + + def extract_data(self): + """ + This is a new style (sept 2019) data extraction. + This could at some point move to a higher level class. + """ + self.get_timestamps() + self.timestamp = self.timestamps[0] + + data_fp = get_datafilepath_from_timestamp(self.timestamp) + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + self.raw_data_dict = h5d.extract_pars_from_datafile( + data_fp, param_spec) + + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + + def process_data(self): + ###################################### + # Sort shots and assign them + ###################################### + n_rounds = self.n_rounds + _cycle = n_rounds*4 + 3 + + _raw_shots = self.raw_data_dict['data'][:,1:]# remove shot number + _shots_0 = _raw_shots[4*n_rounds+0::_cycle] + _shots_1 = _raw_shots[4*n_rounds+1::_cycle] + _shots_2 = _raw_shots[4*n_rounds+2::_cycle] + # Rotate data + center_0 = np.array([np.mean(_shots_0[:,0]), np.mean(_shots_0[:,1])]) + center_1 = np.array([np.mean(_shots_1[:,0]), np.mean(_shots_1[:,1])]) + center_2 = np.array([np.mean(_shots_2[:,0]), np.mean(_shots_2[:,1])]) + raw_shots = _rotate_and_center_data(_raw_shots[:,0], _raw_shots[:,1], center_0, center_1) + Shots_0 = raw_shots[4*n_rounds+0::_cycle] + Shots_1 = raw_shots[4*n_rounds+1::_cycle] + Shots_2 = raw_shots[4*n_rounds+2::_cycle] + self.proc_data_dict['Shots_0'] = Shots_0 + self.proc_data_dict['Shots_1'] = Shots_1 + self.proc_data_dict['Shots_2'] = Shots_2 + # Use classifier for data + data = np.concatenate((Shots_0, Shots_1, Shots_2)) + labels = [0 for s in Shots_0]+[1 for s in Shots_1]+[2 for s in Shots_2] + from sklearn.discriminant_analysis import LinearDiscriminantAnalysis + clf = LinearDiscriminantAnalysis() + clf.fit(data, labels) + dec_bounds = _decision_boundary_points(clf.coef_, clf.intercept_) + Fid_dict = {} + for state, shots in zip([ '0', '1', '2'], + [Shots_0, Shots_1, Shots_2]): + _res = clf.predict(shots) + _fid = np.mean(_res == int(state)) + Fid_dict[state] = _fid + Fid_dict['avg'] = np.mean([f for f in Fid_dict.values()]) + # Get assignment fidelity matrix + M = np.zeros((3,3)) + for i, shots in enumerate([Shots_0, Shots_1, Shots_2]): + for j, state in enumerate(['0', '1', '2']): + _res = clf.predict(shots) + M[i][j] = np.mean(_res == int(state)) + self.proc_data_dict['dec_bounds'] = dec_bounds + self.proc_data_dict['classifier'] = clf + self.proc_data_dict['Fid_dict'] = Fid_dict + self.proc_data_dict['Assignment_matrix'] = M + ######################################### + # Project data along axis perpendicular + # to the decision boundaries. + ######################################### + ############################ + # Projection along 01 axis. + ############################ + # Rotate shots over 01 axis + shots_0 = _rotate_and_center_data(Shots_0[:,0],Shots_0[:,1],dec_bounds['mean'],dec_bounds['01'],phi=np.pi/2) + shots_1 = _rotate_and_center_data(Shots_1[:,0],Shots_1[:,1],dec_bounds['mean'],dec_bounds['01'],phi=np.pi/2) + # Take relavant quadrature + shots_0 = shots_0[:,0] + shots_1 = shots_1[:,0] + n_shots_1 = len(shots_1) + # find range + _all_shots = np.concatenate((shots_0, shots_1)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x0, n0 = np.unique(shots_0, return_counts=True) + x1, n1 = np.unique(shots_1, return_counts=True) + Fid_01, threshold_01 = _calculate_fid_and_threshold(x0, n0, x1, n1) + # Histogram of shots for 1 and 2 + h0, bin_edges = np.histogram(shots_0, bins=100, range=_range) + h1, bin_edges = np.histogram(shots_1, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt0, popt1, params_01 = _fit_double_gauss(bin_centers, h0, h1) + # Save processed data + self.proc_data_dict['projection_01'] = {} + self.proc_data_dict['projection_01']['h0'] = h0 + self.proc_data_dict['projection_01']['h1'] = h1 + self.proc_data_dict['projection_01']['bin_centers'] = bin_centers + self.proc_data_dict['projection_01']['popt0'] = popt0 + self.proc_data_dict['projection_01']['popt1'] = popt1 + self.proc_data_dict['projection_01']['SNR'] = params_01['SNR'] + self.proc_data_dict['projection_01']['Fid'] = Fid_01 + self.proc_data_dict['projection_01']['threshold'] = threshold_01 + ############################ + # Projection along 12 axis. + ############################ + # Rotate shots over 12 axis + shots_1 = _rotate_and_center_data(Shots_1[:,0],Shots_1[:,1],dec_bounds['mean'], dec_bounds['12'], phi=np.pi/2) + shots_2 = _rotate_and_center_data(Shots_2[:,0],Shots_2[:,1],dec_bounds['mean'], dec_bounds['12'], phi=np.pi/2) + # Take relavant quadrature + shots_1 = shots_1[:,0] + shots_2 = shots_2[:,0] + n_shots_2 = len(shots_2) + # find range + _all_shots = np.concatenate((shots_1, shots_2)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x1, n1 = np.unique(shots_1, return_counts=True) + x2, n2 = np.unique(shots_2, return_counts=True) + Fid_12, threshold_12 = _calculate_fid_and_threshold(x1, n1, x2, n2) + # Histogram of shots for 1 and 2 + h1, bin_edges = np.histogram(shots_1, bins=100, range=_range) + h2, bin_edges = np.histogram(shots_2, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt1, popt2, params_12 = _fit_double_gauss(bin_centers, h1, h2) + # Save processed data + self.proc_data_dict['projection_12'] = {} + self.proc_data_dict['projection_12']['h1'] = h1 + self.proc_data_dict['projection_12']['h2'] = h2 + self.proc_data_dict['projection_12']['bin_centers'] = bin_centers + self.proc_data_dict['projection_12']['popt1'] = popt1 + self.proc_data_dict['projection_12']['popt2'] = popt2 + self.proc_data_dict['projection_12']['SNR'] = params_12['SNR'] + self.proc_data_dict['projection_12']['Fid'] = Fid_12 + self.proc_data_dict['projection_12']['threshold'] = threshold_12 + ############################ + # Projection along 02 axis. + ############################ + # Rotate shots over 02 axis + shots_0 = _rotate_and_center_data(Shots_0[:,0],Shots_0[:,1],dec_bounds['mean'],dec_bounds['02'], phi=np.pi/2) + shots_2 = _rotate_and_center_data(Shots_2[:,0],Shots_2[:,1],dec_bounds['mean'],dec_bounds['02'], phi=np.pi/2) + # Take relavant quadrature + shots_0 = shots_0[:,0] + shots_2 = shots_2[:,0] + n_shots_2 = len(shots_2) + # find range + _all_shots = np.concatenate((shots_0, shots_2)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x0, n0 = np.unique(shots_0, return_counts=True) + x2, n2 = np.unique(shots_2, return_counts=True) + Fid_02, threshold_02 = _calculate_fid_and_threshold(x0, n0, x2, n2) + # Histogram of shots for 1 and 2 + h0, bin_edges = np.histogram(shots_0, bins=100, range=_range) + h2, bin_edges = np.histogram(shots_2, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt0, popt2, params_02 = _fit_double_gauss(bin_centers, h0, h2) + # Save processed data + self.proc_data_dict['projection_02'] = {} + self.proc_data_dict['projection_02']['h0'] = h0 + self.proc_data_dict['projection_02']['h2'] = h2 + self.proc_data_dict['projection_02']['bin_centers'] = bin_centers + self.proc_data_dict['projection_02']['popt0'] = popt0 + self.proc_data_dict['projection_02']['popt2'] = popt2 + self.proc_data_dict['projection_02']['SNR'] = params_02['SNR'] + self.proc_data_dict['projection_02']['Fid'] = Fid_02 + self.proc_data_dict['projection_02']['threshold'] = threshold_02 + ######################################## + # Analyze experiment shots, post-select + # on leakage and calculate defect rate + ######################################## + # Sort experimental shots + # 0-Normal experiment + # 1-LRU on data experiment + # 2-LRU on ancilla experiment + # 3-LRU on data and ancilla experiment + shots_exp_0 = {} + Shots_qubit_0 = {} + Shots_qutrit_0 = {} + shots_exp_1 = {} + Shots_qubit_1 = {} + Shots_qutrit_1 = {} + shots_exp_2 = {} + Shots_qubit_2 = {} + Shots_qutrit_2 = {} + shots_exp_3 = {} + Shots_qubit_3 = {} + Shots_qutrit_3 = {} + _zero_lvl = np.mean(self.proc_data_dict['Shots_0'][:,0]) + _one_lvl = np.mean(self.proc_data_dict['Shots_1'][:,1]) + threshold = self.proc_data_dict['projection_01']['threshold'] + for r in range(n_rounds): + # Note we are using the rotated shots already + shots_exp_0[f'round {r+1}'] = raw_shots[r+0*n_rounds::_cycle] + shots_exp_1[f'round {r+1}'] = raw_shots[r+1*n_rounds::_cycle] + shots_exp_2[f'round {r+1}'] = raw_shots[r+2*n_rounds::_cycle] + shots_exp_3[f'round {r+1}'] = raw_shots[r+3*n_rounds::_cycle] + # Perform Qubit assignment + if _zero_lvl < threshold: # zero level is left of threshold + Shots_qubit_0[f'round {r+1}'] = np.array([0 if sthreshold else 1 for s in shots_exp_0[f'round {r+1}'][:,0]]) + Shots_qubit_1[f'round {r+1}'] = np.array([0 if s>threshold else 1 for s in shots_exp_1[f'round {r+1}'][:,0]]) + Shots_qubit_2[f'round {r+1}'] = np.array([0 if s>threshold else 1 for s in shots_exp_2[f'round {r+1}'][:,0]]) + Shots_qubit_3[f'round {r+1}'] = np.array([0 if s>threshold else 1 for s in shots_exp_3[f'round {r+1}'][:,0]]) + # Perform Qutrit assignment + Shots_qutrit_0[f'round {r+1}'] = clf.predict(shots_exp_0[f'round {r+1}']) + Shots_qutrit_1[f'round {r+1}'] = clf.predict(shots_exp_1[f'round {r+1}']) + Shots_qutrit_2[f'round {r+1}'] = clf.predict(shots_exp_2[f'round {r+1}']) + Shots_qutrit_3[f'round {r+1}'] = clf.predict(shots_exp_3[f'round {r+1}']) + # Calculate leakage in ancilla: + Population_0 = {} + Population_1 = {} + Population_2 = {} + Population_3 = {} + def _get_pop_vector(Shots): + p0 = np.mean(Shots==0) + p1 = np.mean(Shots==1) + p2 = np.mean(Shots==2) + return np.array([p0, p1, p2]) + M_inv = np.linalg.inv(M) + for r in range(n_rounds): + _pop_vec_0 = _get_pop_vector(Shots_qutrit_0[f'round {r+1}']) + _pop_vec_1 = _get_pop_vector(Shots_qutrit_1[f'round {r+1}']) + _pop_vec_2 = _get_pop_vector(Shots_qutrit_2[f'round {r+1}']) + _pop_vec_3 = _get_pop_vector(Shots_qutrit_3[f'round {r+1}']) + Population_0[f'round {r+1}'] = np.dot(_pop_vec_0, M_inv) + Population_1[f'round {r+1}'] = np.dot(_pop_vec_1, M_inv) + Population_2[f'round {r+1}'] = np.dot(_pop_vec_2, M_inv) + Population_3[f'round {r+1}'] = np.dot(_pop_vec_3, M_inv) + Population_0 = np.array([Population_0[k][2] for k in Population_0.keys()]) + Population_1 = np.array([Population_1[k][2] for k in Population_1.keys()]) + Population_2 = np.array([Population_2[k][2] for k in Population_2.keys()]) + Population_3 = np.array([Population_3[k][2] for k in Population_3.keys()]) + # Fit leakage and seepage rates + from scipy.optimize import curve_fit + def _func(n, L, S): + return (1 - np.exp(-n*(S+L)))*L/(S+L) + _x = np.arange(0, self.n_rounds)+1 + popt_0, pcov_0 = curve_fit(_func, _x, Population_0) + popt_1, pcov_1 = curve_fit(_func, _x, Population_1) + popt_2, pcov_2 = curve_fit(_func, _x, Population_2) + popt_3, pcov_3 = curve_fit(_func, _x, Population_3) + self.proc_data_dict['Population_0'] = Population_0 + self.proc_data_dict['Population_1'] = Population_1 + self.proc_data_dict['Population_2'] = Population_2 + self.proc_data_dict['Population_3'] = Population_3 + # Perform post-selection on Qutrit readout + Shots_qutrit_ps_0 = {} + Shots_qutrit_ps_1 = {} + Shots_qutrit_ps_2 = {} + Shots_qutrit_ps_3 = {} + nr_shots = len(Shots_qutrit_0['round 1']) + _mask_0 = np.ones(nr_shots) + _mask_1 = np.ones(nr_shots) + _mask_2 = np.ones(nr_shots) + _mask_3 = np.ones(nr_shots) + Ps_fraction_0 = np.ones(n_rounds) + Ps_fraction_1 = np.ones(n_rounds) + Ps_fraction_2 = np.ones(n_rounds) + Ps_fraction_3 = np.ones(n_rounds) + # get post selection mask and ps fraction for each round + for r in range(n_rounds): + _mask_0 *= np.array([1 if s != 2 else np.nan for s in Shots_qutrit_0[f'round {r+1}']]) + _mask_1 *= np.array([1 if s != 2 else np.nan for s in Shots_qutrit_1[f'round {r+1}']]) + _mask_2 *= np.array([1 if s != 2 else np.nan for s in Shots_qutrit_2[f'round {r+1}']]) + _mask_3 *= np.array([1 if s != 2 else np.nan for s in Shots_qutrit_3[f'round {r+1}']]) + Ps_fraction_0[r] = np.nansum(_mask_0)/nr_shots + Ps_fraction_1[r] = np.nansum(_mask_1)/nr_shots + Ps_fraction_2[r] = np.nansum(_mask_2)/nr_shots + Ps_fraction_3[r] = np.nansum(_mask_3)/nr_shots + # remove leakage detection events + for r in range(n_rounds): + Shots_qutrit_ps_0[f'round {r+1}'] = Shots_qutrit_0[f'round {r+1}'][~np.isnan(_mask_0)] + Shots_qutrit_ps_1[f'round {r+1}'] = Shots_qutrit_1[f'round {r+1}'][~np.isnan(_mask_1)] + Shots_qutrit_ps_2[f'round {r+1}'] = Shots_qutrit_2[f'round {r+1}'][~np.isnan(_mask_2)] + Shots_qutrit_ps_3[f'round {r+1}'] = Shots_qutrit_3[f'round {r+1}'][~np.isnan(_mask_3)] + ########################### + # Calculate defect rate + ########################### + print(f'Post-selected fraction normal: {Ps_fraction_0[-1]*100:.5f} %') + print(f'Post-selected fraction LRU data: {Ps_fraction_1[-1]*100:.5f} %') + print(f'Post-selected fraction LRU ancilla: {Ps_fraction_2[-1]*100:.5f} %') + print(f'Post-selected fraction LRU both: {Ps_fraction_3[-1]*100:.5f} %') + defect_rate_0 = _calculate_defect_rate(Shots_qubit_0, n_rounds) + defect_rate_0_ps = _calculate_defect_rate(Shots_qutrit_ps_0, n_rounds) + defect_rate_1 = _calculate_defect_rate(Shots_qubit_1, n_rounds) + defect_rate_1_ps = _calculate_defect_rate(Shots_qutrit_ps_1, n_rounds) + defect_rate_2 = _calculate_defect_rate(Shots_qubit_2, n_rounds) + defect_rate_2_ps = _calculate_defect_rate(Shots_qutrit_ps_2, n_rounds) + defect_rate_3 = _calculate_defect_rate(Shots_qubit_3, n_rounds) + defect_rate_3_ps = _calculate_defect_rate(Shots_qutrit_ps_3, n_rounds) + self.qoi = {} + self.qoi['defect_rate_normal'] = defect_rate_0 + self.qoi['defect_rate_LRU_data'] = defect_rate_1 + self.qoi['defect_rate_LRU_ancilla'] = defect_rate_2 + self.qoi['defect_rate_LRU_data_ancilla'] = defect_rate_3 + self.qoi['defect_rate_normal_ps'] = defect_rate_0_ps + self.qoi['defect_rate_LRU_data_ps'] = defect_rate_1_ps + self.qoi['defect_rate_LRU_ancilla_ps'] = defect_rate_2_ps + self.qoi['defect_rate_LRU_data_ancilla_ps'] = defect_rate_3_ps + self.qoi['Ps_fraction_normal'] = Ps_fraction_0 + self.qoi['Ps_fraction_LRU_data'] = Ps_fraction_1 + self.qoi['Ps_fraction_LRU_ancilla'] = Ps_fraction_2 + self.qoi['Ps_fraction_LRU_data_ancilla'] = Ps_fraction_3 + self.qoi['Population_normal'] = Population_0 + self.qoi['Population_LRU_data'] = Population_1 + self.qoi['Population_LRU_ancilla'] = Population_2 + self.qoi['Population_LRU_data_ancilla'] = Population_3 + + def prepare_plots(self): + self.axs_dict = {} + fig = plt.figure(figsize=(8,4), dpi=100) + axs = [fig.add_subplot(121), + fig.add_subplot(322), + fig.add_subplot(324), + fig.add_subplot(326)] + # fig.patch.set_alpha(0) + self.axs_dict['IQ_readout_histogram'] = axs[0] + self.figs['IQ_readout_histogram'] = fig + self.plot_dicts['IQ_readout_histogram'] = { + 'plotfn': ssro_IQ_projection_plotfn, + 'ax_id': 'IQ_readout_histogram', + 'shots_0': self.proc_data_dict['Shots_0'], + 'shots_1': self.proc_data_dict['Shots_1'], + 'shots_2': self.proc_data_dict['Shots_2'], + 'projection_01': self.proc_data_dict['projection_01'], + 'projection_12': self.proc_data_dict['projection_12'], + 'projection_02': self.proc_data_dict['projection_02'], + 'classifier': self.proc_data_dict['classifier'], + 'dec_bounds': self.proc_data_dict['dec_bounds'], + 'Fid_dict': self.proc_data_dict['Fid_dict'], + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + + fig, axs = plt.subplots(figsize=(6*1.5,2.5*1.5), ncols=2, dpi=100) + self.axs_dict['Deffect_rate_plot'] = axs[0] + self.figs['Deffect_rate_plot'] = fig + self.plot_dicts['Deffect_rate_plot'] = { + 'plotfn': defect_rate_plotfn, + 'ax_id': 'Deffect_rate_plot', + 'n_rounds': self.n_rounds, + 'defect_rate_0': self.qoi['defect_rate_normal'], + 'defect_rate_0_ps': self.qoi['defect_rate_normal_ps'], + 'defect_rate_1': self.qoi['defect_rate_LRU_data'], + 'defect_rate_1_ps': self.qoi['defect_rate_LRU_data_ps'], + 'defect_rate_2': self.qoi['defect_rate_LRU_ancilla'], + 'defect_rate_2_ps': self.qoi['defect_rate_LRU_ancilla_ps'], + 'defect_rate_3': self.qoi['defect_rate_LRU_data_ancilla'], + 'defect_rate_3_ps': self.qoi['defect_rate_LRU_data_ancilla_ps'], + 'ps_0': self.qoi['Ps_fraction_normal'], + 'ps_1': self.qoi['Ps_fraction_LRU_data'], + 'ps_2': self.qoi['Ps_fraction_LRU_ancilla'], + 'ps_3': self.qoi['Ps_fraction_LRU_data_ancilla'], + 'p_0': self.qoi['Population_normal'], + 'p_1': self.qoi['Population_LRU_data'], + 'p_2': self.qoi['Population_LRU_ancilla'], + 'p_3': self.qoi['Population_LRU_data_ancilla'], + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def ssro_IQ_projection_plotfn( + shots_0, + shots_1, + shots_2, + shots_3, + projection_01, + projection_12, + projection_03, + projection_23, + classifier, + dec_bounds, + Fid_dict, + timestamp, + qubit, + ax, **kw): + fig = ax.get_figure() + axs = fig.get_axes() + # Fit 2D gaussians + from scipy.optimize import curve_fit + def twoD_Gaussian(data, amplitude, x0, y0, sigma_x, sigma_y, theta): + x, y = data + x0 = float(x0) + y0 = float(y0) + a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2) + b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2) + c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2) + g = amplitude*np.exp( - (a*((x-x0)**2) + 2*b*(x-x0)*(y-y0) + + c*((y-y0)**2))) + return g.ravel() + def _fit_2D_gaussian(X, Y): + counts, _x, _y = np.histogram2d(X, Y, bins=[100, 100], density=True) + x = (_x[:-1] + _x[1:]) / 2 + y = (_y[:-1] + _y[1:]) / 2 + _x, _y = np.meshgrid(_x, _y) + x, y = np.meshgrid(x, y) + p0 = [counts.max(), np.mean(X), np.mean(Y), np.std(X), np.std(Y), 0] + popt, pcov = curve_fit(twoD_Gaussian, (x, y), counts.T.ravel(), p0=p0) + return popt + popt_0 = _fit_2D_gaussian(shots_0[:,0], shots_0[:,1]) + popt_1 = _fit_2D_gaussian(shots_1[:,0], shots_1[:,1]) + popt_2 = _fit_2D_gaussian(shots_2[:,0], shots_2[:,1]) + popt_3 = _fit_2D_gaussian(shots_3[:,0], shots_3[:,1]) + # Plot stuff + axs[0].plot(shots_0[:,0], shots_0[:,1], '.', color='C0', alpha=0.025) + axs[0].plot(shots_1[:,0], shots_1[:,1], '.', color='C3', alpha=0.025) + axs[0].plot(shots_2[:,0], shots_2[:,1], '.', color='C2', alpha=0.025) + axs[0].plot(shots_3[:,0], shots_3[:,1], '.', color='gold', alpha=0.025) + axs[0].plot([0, popt_0[1]], [0, popt_0[2]], '--', color='k', lw=.5) + axs[0].plot([0, popt_1[1]], [0, popt_1[2]], '--', color='k', lw=.5) + axs[0].plot([0, popt_2[1]], [0, popt_2[2]], '--', color='k', lw=.5) + axs[0].plot([0, popt_3[1]], [0, popt_3[2]], '--', color='k', lw=.5) + axs[0].plot(popt_0[1], popt_0[2], '.', color='C0', label='ground') + axs[0].plot(popt_1[1], popt_1[2], '.', color='C3', label='excited') + axs[0].plot(popt_2[1], popt_2[2], '.', color='C2', label='$2^\mathrm{nd}$ excited') + axs[0].plot(popt_3[1], popt_3[2], '.', color='gold', label='$3^\mathrm{nd}$ excited') + axs[0].plot(popt_0[1], popt_0[2], 'x', color='white') + axs[0].plot(popt_1[1], popt_1[2], 'x', color='white') + axs[0].plot(popt_2[1], popt_2[2], 'x', color='white') + axs[0].plot(popt_3[1], popt_3[2], 'x', color='white') + # Draw 4sigma ellipse around mean + from matplotlib.patches import Ellipse + circle_0 = Ellipse((popt_0[1], popt_0[2]), + width=4*popt_0[3], height=4*popt_0[4], + angle=-popt_0[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[0].add_patch(circle_0) + circle_1 = Ellipse((popt_1[1], popt_1[2]), + width=4*popt_1[3], height=4*popt_1[4], + angle=-popt_1[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[0].add_patch(circle_1) + circle_2 = Ellipse((popt_2[1], popt_2[2]), + width=4*popt_2[3], height=4*popt_2[4], + angle=-popt_2[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[0].add_patch(circle_2) + circle_3 = Ellipse((popt_3[1], popt_3[2]), + width=4*popt_3[3], height=4*popt_3[4], + angle=-popt_3[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[0].add_patch(circle_3) + # Plot classifier zones + from matplotlib.patches import Polygon + _all_shots = np.concatenate((shots_0, shots_1)) + _lim = np.max([ np.max(np.abs(_all_shots[:,0]))*1.1, np.max(np.abs(_all_shots[:,1]))*1.1 ]) + Lim_points = {} + for bound in ['01', '12', '03', '23']: + dec_bounds['mean'] + _x0, _y0 = dec_bounds['mean'] + _x1, _y1 = dec_bounds[bound] + a = (_y1-_y0)/(_x1-_x0) + b = _y0 - a*_x0 + _xlim = 1e2*np.sign(_x1-_x0) + _ylim = a*_xlim + b + Lim_points[bound] = _xlim, _ylim + # Plot 0 area + _points = [dec_bounds['mean'], Lim_points['01'], Lim_points['03']] + _patch = Polygon(_points, color='C0', alpha=0.2, lw=0) + axs[0].add_patch(_patch) + # Plot 1 area + _points = [dec_bounds['mean'], Lim_points['01'], Lim_points['12']] + _patch = Polygon(_points, color='C3', alpha=0.2, lw=0) + axs[0].add_patch(_patch) + # Plot 2 area + _points = [dec_bounds['mean'], Lim_points['12'], Lim_points['23']] + _patch = Polygon(_points, color='C2', alpha=0.2, lw=0) + axs[0].add_patch(_patch) + # Plot 2 area + _points = [dec_bounds['mean'], Lim_points['03'], Lim_points['23']] + _patch = Polygon(_points, color='gold', alpha=0.2, lw=0) + axs[0].add_patch(_patch) + # Plot decision boundary + for bound in ['01', '12', '03', '23']: + _x0, _y0 = dec_bounds['mean'] + _x1, _y1 = Lim_points[bound] + axs[0].plot([_x0, _x1], [_y0, _y1], 'k--', lw=1) + axs[0].set_xlim(-_lim, _lim) + axs[0].set_ylim(-_lim, _lim) + axs[0].legend(frameon=False, loc=1) + axs[0].set_xlabel('Integrated voltage I') + axs[0].set_ylabel('Integrated voltage Q') + axs[0].set_title(f'IQ plot qubit {qubit}') + fig.suptitle(f'{timestamp}\n') + ########################## + # Plot projections + ########################## + # 01 projection + _bin_c = projection_01['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[1].bar(_bin_c, projection_01['h0'], bin_width, fc='C0', alpha=0.4) + axs[1].bar(_bin_c, projection_01['h1'], bin_width, fc='C3', alpha=0.4) + axs[1].plot(_bin_c, double_gauss(_bin_c, *projection_01['popt0']), '-C0') + axs[1].plot(_bin_c, double_gauss(_bin_c, *projection_01['popt1']), '-C3') + axs[1].axvline(projection_01['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_01["Fid"]*100:.1f}%', + f'SNR : {projection_01["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[1].text(.775, .9, text, transform=axs[1].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[1].text(projection_01['popt0'][0], projection_01['popt0'][4]/2, + r'$|g\rangle$', ha='center', va='center', color='C0') + axs[1].text(projection_01['popt1'][0], projection_01['popt1'][4]/2, + r'$|e\rangle$', ha='center', va='center', color='C3') + axs[1].set_xticklabels([]) + axs[1].set_xlim(_bin_c[0], _bin_c[-1]) + axs[1].set_ylim(bottom=0) + axs[1].set_title('Projection of data') + # 12 projection + _bin_c = projection_12['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[2].bar(_bin_c, projection_12['h1'], bin_width, fc='C3', alpha=0.4) + axs[2].bar(_bin_c, projection_12['h2'], bin_width, fc='C2', alpha=0.4) + axs[2].plot(_bin_c, double_gauss(_bin_c, *projection_12['popt1']), '-C3') + axs[2].plot(_bin_c, double_gauss(_bin_c, *projection_12['popt2']), '-C2') + axs[2].axvline(projection_12['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_12["Fid"]*100:.1f}%', + f'SNR : {projection_12["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[2].text(.775, .9, text, transform=axs[2].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[2].text(projection_12['popt1'][0], projection_12['popt1'][4]/2, + r'$|e\rangle$', ha='center', va='center', color='C3') + axs[2].text(projection_12['popt2'][0], projection_12['popt2'][4]/2, + r'$|f\rangle$', ha='center', va='center', color='C2') + axs[2].set_xticklabels([]) + axs[2].set_xlim(_bin_c[0], _bin_c[-1]) + axs[2].set_ylim(bottom=0) + # 03 projection + _bin_c = projection_03['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[3].bar(_bin_c, projection_03['h0'], bin_width, fc='C0', alpha=0.4) + axs[3].bar(_bin_c, projection_03['h3'], bin_width, fc='gold', alpha=0.4) + axs[3].plot(_bin_c, double_gauss(_bin_c, *projection_03['popt0']), '-C0') + axs[3].plot(_bin_c, double_gauss(_bin_c, *projection_03['popt3']), '-C1') + axs[3].axvline(projection_03['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_03["Fid"]*100:.1f}%', + f'SNR : {projection_03["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[3].text(.775, .9, text, transform=axs[3].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[3].text(projection_03['popt0'][0], projection_03['popt0'][4]/2, + r'$|g\rangle$', ha='center', va='center', color='C0') + axs[3].text(projection_03['popt3'][0], projection_03['popt3'][4]/2, + r'$|f\rangle$', ha='center', va='center', color='gold') + axs[3].set_xticklabels([]) + axs[3].set_xlim(_bin_c[0], _bin_c[-1]) + axs[3].set_ylim(bottom=0) + axs[3].set_xlabel('Integrated voltage') + # 23 projection + _bin_c = projection_23['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[4].bar(_bin_c, projection_23['h2'], bin_width, fc='C2', alpha=0.4) + axs[4].bar(_bin_c, projection_23['h3'], bin_width, fc='gold', alpha=0.4) + axs[4].plot(_bin_c, double_gauss(_bin_c, *projection_23['popt2']), '-C2') + axs[4].plot(_bin_c, double_gauss(_bin_c, *projection_23['popt3']), '-C1') + axs[4].axvline(projection_23['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_23["Fid"]*100:.1f}%', + f'SNR : {projection_23["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[4].text(.775, .9, text, transform=axs[4].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[4].text(projection_23['popt2'][0], projection_23['popt2'][4]/2, + r'$|g\rangle$', ha='center', va='center', color='C0') + axs[4].text(projection_23['popt3'][0], projection_23['popt3'][4]/2, + r'$|f\rangle$', ha='center', va='center', color='gold') + axs[4].set_xticklabels([]) + axs[4].set_xlim(_bin_c[0], _bin_c[-1]) + axs[4].set_ylim(bottom=0) + axs[4].set_xlabel('Integrated voltage') + # Write fidelity textbox + text = '\n'.join(('Assignment fidelity:', + f'$F_g$ : {Fid_dict["0"]*100:.1f}%', + f'$F_e$ : {Fid_dict["1"]*100:.1f}%', + f'$F_f$ : {Fid_dict["2"]*100:.1f}%', + f'$F_h$ : {Fid_dict["3"]*100:.1f}%' if '3' in Fid_dict.keys() else '', + f'$F_\mathrm{"{avg}"}$ : {Fid_dict["avg"]*100:.1f}%')) + props = dict(boxstyle='round', facecolor='gray', alpha=.2) + axs[1].text(1.12, 1, text, transform=axs[1].transAxes, + verticalalignment='top', bbox=props) + +def ssro_IQ_projection_plotfn_2( + shots_0, + shots_1, + shots_2, + projection_01, + projection_12, + projection_02, + classifier, + dec_bounds, + Fid_dict, + timestamp, + qubit, + ax, **kw): + fig = ax.get_figure() + axs = fig.get_axes() + # Fit 2D gaussians + from scipy.optimize import curve_fit + def twoD_Gaussian(data, amplitude, x0, y0, sigma_x, sigma_y, theta): + x, y = data + x0 = float(x0) + y0 = float(y0) + a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2) + b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2) + c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2) + g = amplitude*np.exp( - (a*((x-x0)**2) + 2*b*(x-x0)*(y-y0) + + c*((y-y0)**2))) + return g.ravel() + def _fit_2D_gaussian(X, Y): + counts, _x, _y = np.histogram2d(X, Y, bins=[100, 100], density=True) + x = (_x[:-1] + _x[1:]) / 2 + y = (_y[:-1] + _y[1:]) / 2 + _x, _y = np.meshgrid(_x, _y) + x, y = np.meshgrid(x, y) + p0 = [counts.max(), np.mean(X), np.mean(Y), np.std(X), np.std(Y), 0] + popt, pcov = curve_fit(twoD_Gaussian, (x, y), counts.T.ravel(), p0=p0) + return popt + popt_0 = _fit_2D_gaussian(shots_0[:,0], shots_0[:,1]) + popt_1 = _fit_2D_gaussian(shots_1[:,0], shots_1[:,1]) + popt_2 = _fit_2D_gaussian(shots_2[:,0], shots_2[:,1]) + # Plot stuff + axs[0].plot(shots_0[:,0], shots_0[:,1], '.', color='C0', alpha=0.05) + axs[0].plot(shots_1[:,0], shots_1[:,1], '.', color='C3', alpha=0.05) + axs[0].plot(shots_2[:,0], shots_2[:,1], '.', color='C2', alpha=0.05) + axs[0].plot([0, popt_0[1]], [0, popt_0[2]], '--', color='k', lw=.5) + axs[0].plot([0, popt_1[1]], [0, popt_1[2]], '--', color='k', lw=.5) + axs[0].plot([0, popt_2[1]], [0, popt_2[2]], '--', color='k', lw=.5) + axs[0].plot(popt_0[1], popt_0[2], '.', color='C0', label='ground') + axs[0].plot(popt_1[1], popt_1[2], '.', color='C3', label='excited') + axs[0].plot(popt_2[1], popt_2[2], '.', color='C2', label='$2^\mathrm{nd}$ excited') + axs[0].plot(popt_0[1], popt_0[2], 'x', color='white') + axs[0].plot(popt_1[1], popt_1[2], 'x', color='white') + axs[0].plot(popt_2[1], popt_2[2], 'x', color='white') + # Draw 4sigma ellipse around mean + from matplotlib.patches import Ellipse + circle_0 = Ellipse((popt_0[1], popt_0[2]), + width=4*popt_0[3], height=4*popt_0[4], + angle=-popt_0[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[0].add_patch(circle_0) + circle_1 = Ellipse((popt_1[1], popt_1[2]), + width=4*popt_1[3], height=4*popt_1[4], + angle=-popt_1[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[0].add_patch(circle_1) + circle_2 = Ellipse((popt_2[1], popt_2[2]), + width=4*popt_2[3], height=4*popt_2[4], + angle=-popt_2[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[0].add_patch(circle_2) + # Plot classifier zones + from matplotlib.patches import Polygon + _all_shots = np.concatenate((shots_0, shots_1, shots_2)) + _lim = np.max([ np.max(np.abs(_all_shots[:,0]))*1.1, np.max(np.abs(_all_shots[:,1]))*1.1 ]) + Lim_points = {} + for bound in ['01', '12', '02']: + dec_bounds['mean'] + _x0, _y0 = dec_bounds['mean'] + _x1, _y1 = dec_bounds[bound] + a = (_y1-_y0)/(_x1-_x0) + b = _y0 - a*_x0 + _xlim = 1e2*np.sign(_x1-_x0) + _ylim = a*_xlim + b + Lim_points[bound] = _xlim, _ylim + # Plot 0 area + _points = [dec_bounds['mean'], Lim_points['01'], Lim_points['02']] + _patch = Polygon(_points, color='C0', alpha=0.2, lw=0) + axs[0].add_patch(_patch) + # Plot 1 area + _points = [dec_bounds['mean'], Lim_points['01'], Lim_points['12']] + _patch = Polygon(_points, color='C3', alpha=0.2, lw=0) + axs[0].add_patch(_patch) + # Plot 2 area + _points = [dec_bounds['mean'], Lim_points['02'], Lim_points['12']] + _patch = Polygon(_points, color='C2', alpha=0.2, lw=0) + axs[0].add_patch(_patch) + # Plot decision boundary + for bound in ['01', '12', '02']: + _x0, _y0 = dec_bounds['mean'] + _x1, _y1 = Lim_points[bound] + axs[0].plot([_x0, _x1], [_y0, _y1], 'k--', lw=1) + axs[0].set_xlim(-_lim, _lim) + axs[0].set_ylim(-_lim, _lim) + axs[0].legend(frameon=False) + axs[0].set_xlabel('Integrated voltage I') + axs[0].set_ylabel('Integrated voltage Q') + axs[0].set_title(f'IQ plot qubit {qubit}') + fig.suptitle(f'{timestamp}\n') + ########################## + # Plot projections + ########################## + # 01 projection + _bin_c = projection_01['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[1].bar(_bin_c, projection_01['h0'], bin_width, fc='C0', alpha=0.4) + axs[1].bar(_bin_c, projection_01['h1'], bin_width, fc='C3', alpha=0.4) + axs[1].plot(_bin_c, double_gauss(_bin_c, *projection_01['popt0']), '-C0') + axs[1].plot(_bin_c, double_gauss(_bin_c, *projection_01['popt1']), '-C3') + axs[1].axvline(projection_01['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_01["Fid"]*100:.1f}%', + f'SNR : {projection_01["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[1].text(.775, .9, text, transform=axs[1].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[1].text(projection_01['popt0'][0], projection_01['popt0'][4]/2, + r'$|g\rangle$', ha='center', va='center', color='C0') + axs[1].text(projection_01['popt1'][0], projection_01['popt1'][4]/2, + r'$|e\rangle$', ha='center', va='center', color='C3') + axs[1].set_xticklabels([]) + axs[1].set_xlim(_bin_c[0], _bin_c[-1]) + axs[1].set_ylim(bottom=0) + axs[1].set_title('Projection of data') + # 12 projection + _bin_c = projection_12['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[2].bar(_bin_c, projection_12['h1'], bin_width, fc='C3', alpha=0.4) + axs[2].bar(_bin_c, projection_12['h2'], bin_width, fc='C2', alpha=0.4) + axs[2].plot(_bin_c, double_gauss(_bin_c, *projection_12['popt1']), '-C3') + axs[2].plot(_bin_c, double_gauss(_bin_c, *projection_12['popt2']), '-C2') + axs[2].axvline(projection_12['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_12["Fid"]*100:.1f}%', + f'SNR : {projection_12["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[2].text(.775, .9, text, transform=axs[2].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[2].text(projection_12['popt1'][0], projection_12['popt1'][4]/2, + r'$|e\rangle$', ha='center', va='center', color='C3') + axs[2].text(projection_12['popt2'][0], projection_12['popt2'][4]/2, + r'$|f\rangle$', ha='center', va='center', color='C2') + axs[2].set_xticklabels([]) + axs[2].set_xlim(_bin_c[0], _bin_c[-1]) + axs[2].set_ylim(bottom=0) + # 02 projection + _bin_c = projection_02['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[3].bar(_bin_c, projection_02['h0'], bin_width, fc='C0', alpha=0.4) + axs[3].bar(_bin_c, projection_02['h2'], bin_width, fc='C2', alpha=0.4) + axs[3].plot(_bin_c, double_gauss(_bin_c, *projection_02['popt0']), '-C0') + axs[3].plot(_bin_c, double_gauss(_bin_c, *projection_02['popt2']), '-C2') + axs[3].axvline(projection_02['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_02["Fid"]*100:.1f}%', + f'SNR : {projection_02["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[3].text(.775, .9, text, transform=axs[3].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[3].text(projection_02['popt0'][0], projection_02['popt0'][4]/2, + r'$|g\rangle$', ha='center', va='center', color='C0') + axs[3].text(projection_02['popt2'][0], projection_02['popt2'][4]/2, + r'$|f\rangle$', ha='center', va='center', color='C2') + axs[3].set_xticklabels([]) + axs[3].set_xlim(_bin_c[0], _bin_c[-1]) + axs[3].set_ylim(bottom=0) + axs[3].set_xlabel('Integrated voltage') + # Write fidelity textbox + text = '\n'.join(('Assignment fidelity:', + f'$F_g$ : {Fid_dict["0"]*100:.1f}%', + f'$F_e$ : {Fid_dict["1"]*100:.1f}%', + f'$F_f$ : {Fid_dict["2"]*100:.1f}%', + f'$F_\mathrm{"{avg}"}$ : {Fid_dict["avg"]*100:.1f}%')) + props = dict(boxstyle='round', facecolor='gray', alpha=.2) + axs[1].text(1.05, 1, text, transform=axs[1].transAxes, + verticalalignment='top', bbox=props) + +def defect_rate_plotn( + n_rounds, + defect_rate_0, + defect_rate_0_ps, + defect_rate_1, + defect_rate_1_ps, + defect_rate_2, + defect_rate_2_ps, + defect_rate_3, + defect_rate_3_ps, + ps_0, + ps_1, + ps_2, + ps_3, + p_0, + p_1, + p_2, + p_3, + timestamp, + qubit, + ax, **kw): + fig = ax.get_figure() + axs = fig.get_axes() + + axs[0].plot((np.arange(n_rounds)+1)[1:], defect_rate_0[1:], 'C0-', label='Normal') + # axs[0].plot((np.arange(n_rounds)+1)[1:], defect_rate_0_ps[1:], 'C0-', alpha=.5) + axs[0].plot((np.arange(n_rounds)+1)[1:], defect_rate_1[1:], 'C1-', label='LRU data') + # axs[0].plot((np.arange(n_rounds)+1)[1:], defect_rate_1_ps[1:], 'C1-', alpha=.5) + axs[0].plot((np.arange(n_rounds)+1)[1:], defect_rate_2[1:], 'C2-', label='LRU ancilla') + # axs[0].plot((np.arange(n_rounds)+1)[1:], defect_rate_2_ps[1:], 'C2-', alpha=.5) + axs[0].plot((np.arange(n_rounds)+1)[1:], defect_rate_3[1:], 'C3-', label='LRU data ancilla') + # axs[0].plot((np.arange(n_rounds)+1)[1:], defect_rate_3_ps[1:], 'C3-', alpha=.5) + axs[0].grid(ls='--') + axs[0].set_ylabel('error probability') + axs[0].set_xlabel('rounds') + axs[0].legend(frameon=False, bbox_to_anchor = (1.01, 1)) + axs[0].set_title('defect rate') + + axs[1].plot((np.arange(n_rounds)+1), p_0*100, 'C0-', label='Normal') + axs[1].plot((np.arange(n_rounds)+1), p_1*100, 'C1-', label='LRU data') + axs[1].plot((np.arange(n_rounds)+1), p_2*100, 'C2-', label='LRU ancilla') + axs[1].plot((np.arange(n_rounds)+1), p_3*100, 'C3-', label='LRU data ancilla') + axs[1].set_ylabel(r'$|f\rangle$ population (%)') + axs[1].set_xlabel('rounds') + axs[1].set_title('Leakage population') + axs[1].grid(ls='--') + + fig.suptitle(f'{timestamp}\n{qubit} repeated stabilizer experiment', y=1.01) fig.tight_layout() - ax[4].text(1.08, 1.25, 'Single parity', transform=ax[4].transAxes, fontsize=12, - verticalalignment='top') - ax[4].text(1.1, 0.95, textstr1, transform=ax[4].transAxes, fontsize=10, - verticalalignment='top', bbox=props) - ax[4].text(2.25, 1.25, 'Repeated parity', transform=ax[4].transAxes, fontsize=12, - verticalalignment='top') - ax[4].text(2.27, 0.95, textstr2, transform=ax[4].transAxes, fontsize=10, - verticalalignment='top', bbox=props) - fig.suptitle(f'Sandia parity benchmark {timestamp}', y=1.01, x=.43) +def _calculate_defects(Shots, n_rounds, Data_qubit_meas): + ''' + Shots must be a dictionary with format: + |<---nr_shots--->| + Shots['round '] = np.array([0/1,......., 0/1]) + Returns defect values in +1/-1 (where -1 corresponds to defect). + ''' + Deffect_rate = {} + nr_shots = len(Shots['round 1']) + # M array is measured data + # P array is parity data + # D array is defect data + M_values = np.ones((nr_shots, n_rounds)) + for r in range(n_rounds): + # Convert to +1 and -1 values + M_values[:,r] *= 1-2*(Shots[f'round {r+1}']) + # Append +1 Pauli frame in first round + P_values = np.hstack( (np.ones((nr_shots, 2)), M_values) ) + P_values = P_values[:,1:] * P_values[:,:-1] + # Compute parity from data-qubit readout + _final_parity = np.ones((nr_shots, 1)) + for _Data_shots in Data_qubit_meas: + # convert to +1 and -1 values and reshape + _Data_shots = 1-2*_Data_shots.reshape(nr_shots, 1) + _final_parity *= _Data_shots + # Append to ancilla measured parities + P_values = np.hstack((P_values, _final_parity)) + # Second derivative of parity to get defects + D_values = P_values[:,1:] * P_values[:,:-1] + return D_values + +class Repeated_stabilizer_measurements(ba.BaseDataAnalysis): + def __init__(self, + ancilla_qubit, + data_qubits: list, + Rounds: list, + heralded_init: bool = False, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + with_reset: bool = False, + number_of_kernels: int = 3, + experiments: list = None, + Pij_matrix: bool = False, + remaining_ancillas: list=[], + auto=True + ): + super().__init__(t_start=t_start, t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) + # can be given as single ancilla + # or as list of ancillas if one wants to analyze + # all ancillas. + if isinstance(ancilla_qubit, str): + ancilla_qubit = [ancilla_qubit] + self.ancilla_qubit = ancilla_qubit + self.data_qubits = data_qubits + self.Rounds = Rounds + self.with_reset = with_reset + self.heralded_init = heralded_init + self.number_of_kernels = number_of_kernels + self.Pij_matrix = Pij_matrix + if experiments: + assert len(experiments) == number_of_kernels + else: + experiments = [ f'kernel {i}' for i in range(number_of_kernels) ] + self.experiments = experiments + if auto: + self.run_analysis() + def extract_data(self): + self.get_timestamps() + self.timestamp = self.timestamps[0] + data_fp = get_datafilepath_from_timestamp(self.timestamp) + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + self.raw_data_dict = h5d.extract_pars_from_datafile( + data_fp, param_spec) + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + + def process_data(self): + self.qoi = {} + ###################################### + # Sort shots and assign them + ###################################### + n_kernels = self.number_of_kernels + Rounds = self.Rounds + _total_rounds = np.sum(Rounds) + # Add heralding measurement for experimental shots + if self.heralded_init: + _total_rounds += len(Rounds) + _cycle = _total_rounds*n_kernels + 3 + # Add heralding measurement for calibration points + if self.heralded_init: + _cycle += 3 + # Get qubit names in channel order + # Note: .decode() is necessary for old compression version of HDF5 files + ch_names = [(name.decode() if isinstance(name, bytes) else name) for name in self.raw_data_dict['value_names'] ] + self.Qubits = self.ancilla_qubit + self.data_qubits + def _find_channel(ch_name): + for i, name in enumerate(ch_names): + if ch_name in name: + return i+1 + chan_idxs = { q: (_find_channel(f'{q} I'), + _find_channel(f'{q} Q')) for q in self.Qubits} + + # Dictionary that will store raw shots + # so that they can later be sorted. + raw_shots = {q: {} for q in self.Qubits} + Thresholds = {} + + for q_idx, qubit in enumerate(self.Qubits): + self.proc_data_dict[qubit] = {} + _ch_I, _ch_Q = chan_idxs[qubit] + _raw_shots = self.raw_data_dict['data'][:,[_ch_I, _ch_Q]] + if self.heralded_init: + _shots_0 = _raw_shots[n_kernels*_total_rounds+1::_cycle] + _shots_1 = _raw_shots[n_kernels*_total_rounds+3::_cycle] + _shots_2 = _raw_shots[n_kernels*_total_rounds+5::_cycle] + else: + _shots_0 = _raw_shots[n_kernels*_total_rounds+0::_cycle] + _shots_1 = _raw_shots[n_kernels*_total_rounds+1::_cycle] + _shots_2 = _raw_shots[n_kernels*_total_rounds+2::_cycle] + # Rotate data + center_0 = np.array([np.mean(_shots_0[:,0]), np.mean(_shots_0[:,1])]) + center_1 = np.array([np.mean(_shots_1[:,0]), np.mean(_shots_1[:,1])]) + center_2 = np.array([np.mean(_shots_2[:,0]), np.mean(_shots_2[:,1])]) + raw_shots[qubit] = _rotate_and_center_data(_raw_shots[:,0], _raw_shots[:,1], center_0, center_1) + if self.heralded_init: + Shots_0 = raw_shots[qubit][n_kernels*_total_rounds+1::_cycle] + Shots_1 = raw_shots[qubit][n_kernels*_total_rounds+3::_cycle] + Shots_2 = raw_shots[qubit][n_kernels*_total_rounds+5::_cycle] + else: + Shots_0 = raw_shots[qubit][n_kernels*_total_rounds+0::_cycle] + Shots_1 = raw_shots[qubit][n_kernels*_total_rounds+1::_cycle] + Shots_2 = raw_shots[qubit][n_kernels*_total_rounds+2::_cycle] + + # _s_0, _s_1 = np.mean(Shots_0[:,0]), np.mean(Shots_1[:,0]) + # if _s_1 < _s_0: + # Shots_0, Shots_1 = -np.array(Shots_0), -np.array(Shots_1) + Thresholds[qubit] = estimate_threshold(Shots_0[:,0], Shots_1[:,0]) + self.proc_data_dict[qubit]['Shots_0'] = Shots_0 + self.proc_data_dict[qubit]['Shots_1'] = Shots_1 + self.proc_data_dict[qubit]['Shots_2'] = Shots_2 + self.proc_data_dict['Thresholds'] = Thresholds + # Use classifier for data + data = np.concatenate((Shots_0, Shots_1, Shots_2)) + labels = [0 for s in Shots_0]+[1 for s in Shots_1]+[2 for s in Shots_2] + from sklearn.discriminant_analysis import LinearDiscriminantAnalysis + clf = LinearDiscriminantAnalysis() + clf.fit(data, labels) + dec_bounds = _decision_boundary_points(clf.coef_, clf.intercept_) + Fid_dict = {} + for state, shots in zip([ '0', '1', '2'], + [Shots_0, Shots_1, Shots_2]): + _res = clf.predict(shots) + _fid = np.mean(_res == int(state)) + Fid_dict[state] = _fid + Fid_dict['avg'] = np.mean([f for f in Fid_dict.values()]) + # Get assignment fidelity matrix + M = np.zeros((3,3)) + for i, shots in enumerate([Shots_0, Shots_1, Shots_2]): + for j, state in enumerate(['0', '1', '2']): + _res = clf.predict(shots) + M[i][j] = np.mean(_res == int(state)) + # # Make it a 4x4 matrix + # M = np.append(M, [[0,0,0]], 0) + # M = np.append(M, [[0],[0],[0],[1]], 1) + self.proc_data_dict[qubit]['classifier'] = clf + self.proc_data_dict[qubit]['Fid_dict'] = Fid_dict + self.proc_data_dict[qubit]['Assignment_matrix'] = M + self.proc_data_dict[qubit]['dec_bounds'] = dec_bounds + + ################## + # Post selection + ################## + if self.heralded_init: + _ps_shots_0 = raw_shots[qubit][n_kernels*_total_rounds+0::_cycle] + _ps_shots_1 = raw_shots[qubit][n_kernels*_total_rounds+2::_cycle] + _ps_shots_2 = raw_shots[qubit][n_kernels*_total_rounds+4::_cycle] + + def _post_select(shots, ps_shots): + _ps_shots = clf.predict(ps_shots) + _mask = np.array([1 if s == 0 else np.nan for s in _ps_shots]) + # print(np.nansum(_mask)/ len(_mask)) + shots = shots[~np.isnan(_mask)] + return shots + + Shots_0 = _post_select(Shots_0, _ps_shots_0) + Shots_1 = _post_select(Shots_1, _ps_shots_1) + Shots_2 = _post_select(Shots_2, _ps_shots_2) + self.proc_data_dict[qubit]['Shots_0'] = Shots_0 + self.proc_data_dict[qubit]['Shots_1'] = Shots_1 + self.proc_data_dict[qubit]['Shots_2'] = Shots_2 + # Use classifier for data + data = np.concatenate((Shots_0, Shots_1, Shots_2)) + labels = [0 for s in Shots_0]+[1 for s in Shots_1]+[2 for s in Shots_2] + from sklearn.discriminant_analysis import LinearDiscriminantAnalysis + clf = LinearDiscriminantAnalysis() + clf.fit(data, labels) + dec_bounds = _decision_boundary_points(clf.coef_, clf.intercept_) + Fid_dict = {} + for state, shots in zip([ '0', '1', '2'], + [Shots_0, Shots_1, Shots_2]): + _res = clf.predict(shots) + _fid = np.mean(_res == int(state)) + Fid_dict[state] = _fid + Fid_dict['avg'] = np.mean([f for f in Fid_dict.values()]) + # Get assignment fidelity matrix + M = np.zeros((3,3)) + for i, shots in enumerate([Shots_0, Shots_1, Shots_2]): + for j, state in enumerate(['0', '1', '2']): + _res = clf.predict(shots) + M[i][j] = np.mean(_res == int(state)) + # # Make it a 4x4 matrix + # M = np.append(M, [[0,0,0]], 0) + # M = np.append(M, [[0],[0],[0],[1]], 1) + self.proc_data_dict[qubit]['classifier'] = clf + self.proc_data_dict[qubit]['Fid_dict'] = Fid_dict + self.proc_data_dict[qubit]['Assignment_matrix'] = M + self.proc_data_dict[qubit]['dec_bounds'] = dec_bounds + ######################################### + # Project data along axis perpendicular + # to the decision boundaries. + ######################################### + ############################ + # Projection along 01 axis. + ############################ + # Rotate shots over 01 axis + shots_0 = _rotate_and_center_data(Shots_0[:,0],Shots_0[:,1],dec_bounds['mean'],dec_bounds['01'],phi=np.pi/2) + shots_1 = _rotate_and_center_data(Shots_1[:,0],Shots_1[:,1],dec_bounds['mean'],dec_bounds['01'],phi=np.pi/2) + # Take relavant quadrature + shots_0 = shots_0[:,0] + shots_1 = shots_1[:,0] + n_shots_1 = len(shots_1) + # find range + _all_shots = np.concatenate((shots_0, shots_1)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x0, n0 = np.unique(shots_0, return_counts=True) + x1, n1 = np.unique(shots_1, return_counts=True) + Fid_01, threshold_01 = _calculate_fid_and_threshold(x0, n0, x1, n1) + # Histogram of shots for 1 and 2 + h0, bin_edges = np.histogram(shots_0, bins=100, range=_range) + h1, bin_edges = np.histogram(shots_1, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt0, popt1, params_01 = _fit_double_gauss(bin_centers, h0, h1) + # Save processed data + self.proc_data_dict[qubit]['projection_01'] = {} + self.proc_data_dict[qubit]['projection_01']['h0'] = h0 + self.proc_data_dict[qubit]['projection_01']['h1'] = h1 + self.proc_data_dict[qubit]['projection_01']['bin_centers'] = bin_centers + self.proc_data_dict[qubit]['projection_01']['popt0'] = popt0 + self.proc_data_dict[qubit]['projection_01']['popt1'] = popt1 + self.proc_data_dict[qubit]['projection_01']['SNR'] = params_01['SNR'] + self.proc_data_dict[qubit]['projection_01']['Fid'] = Fid_01 + self.proc_data_dict[qubit]['projection_01']['threshold'] = threshold_01 + ############################ + # Projection along 12 axis. + ############################ + # Rotate shots over 12 axis + shots_1 = _rotate_and_center_data(Shots_1[:,0],Shots_1[:,1],dec_bounds['mean'], dec_bounds['12'], phi=np.pi/2) + shots_2 = _rotate_and_center_data(Shots_2[:,0],Shots_2[:,1],dec_bounds['mean'], dec_bounds['12'], phi=np.pi/2) + # Take relavant quadrature + shots_1 = shots_1[:,0] + shots_2 = shots_2[:,0] + n_shots_2 = len(shots_2) + # find range + _all_shots = np.concatenate((shots_1, shots_2)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x1, n1 = np.unique(shots_1, return_counts=True) + x2, n2 = np.unique(shots_2, return_counts=True) + Fid_12, threshold_12 = _calculate_fid_and_threshold(x1, n1, x2, n2) + # Histogram of shots for 1 and 2 + h1, bin_edges = np.histogram(shots_1, bins=100, range=_range) + h2, bin_edges = np.histogram(shots_2, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt1, popt2, params_12 = _fit_double_gauss(bin_centers, h1, h2) + # Save processed data + self.proc_data_dict[qubit]['projection_12'] = {} + self.proc_data_dict[qubit]['projection_12']['h1'] = h1 + self.proc_data_dict[qubit]['projection_12']['h2'] = h2 + self.proc_data_dict[qubit]['projection_12']['bin_centers'] = bin_centers + self.proc_data_dict[qubit]['projection_12']['popt1'] = popt1 + self.proc_data_dict[qubit]['projection_12']['popt2'] = popt2 + self.proc_data_dict[qubit]['projection_12']['SNR'] = params_12['SNR'] + self.proc_data_dict[qubit]['projection_12']['Fid'] = Fid_12 + self.proc_data_dict[qubit]['projection_12']['threshold'] = threshold_12 + ############################ + # Projection along 02 axis. + ############################ + # Rotate shots over 02 axis + shots_0 = _rotate_and_center_data(Shots_0[:,0],Shots_0[:,1],dec_bounds['mean'],dec_bounds['02'], phi=np.pi/2) + shots_2 = _rotate_and_center_data(Shots_2[:,0],Shots_2[:,1],dec_bounds['mean'],dec_bounds['02'], phi=np.pi/2) + # Take relavant quadrature + shots_0 = shots_0[:,0] + shots_2 = shots_2[:,0] + n_shots_2 = len(shots_2) + # find range + _all_shots = np.concatenate((shots_0, shots_2)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x0, n0 = np.unique(shots_0, return_counts=True) + x2, n2 = np.unique(shots_2, return_counts=True) + Fid_02, threshold_02 = _calculate_fid_and_threshold(x0, n0, x2, n2) + # Histogram of shots for 1 and 2 + h0, bin_edges = np.histogram(shots_0, bins=100, range=_range) + h2, bin_edges = np.histogram(shots_2, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt0, popt2, params_02 = _fit_double_gauss(bin_centers, h0, h2) + # Save processed data + self.proc_data_dict[qubit]['projection_02'] = {} + self.proc_data_dict[qubit]['projection_02']['h0'] = h0 + self.proc_data_dict[qubit]['projection_02']['h2'] = h2 + self.proc_data_dict[qubit]['projection_02']['bin_centers'] = bin_centers + self.proc_data_dict[qubit]['projection_02']['popt0'] = popt0 + self.proc_data_dict[qubit]['projection_02']['popt2'] = popt2 + self.proc_data_dict[qubit]['projection_02']['SNR'] = params_02['SNR'] + self.proc_data_dict[qubit]['projection_02']['Fid'] = Fid_02 + self.proc_data_dict[qubit]['projection_02']['threshold'] = threshold_02 + + ######################################## + # Analyze experiment shots, post-select + # on leakage and calculate defect rate + ######################################## + # Sort experimental shots + # different kernels are different experiment types + # (single stabilizer, surface_13, surface_17...) + shots_exp = { k:{} for k in range(n_kernels) } + Shots_qubit = { k:{} for k in range(n_kernels) } + Shots_qutrit = { k:{} for k in range(n_kernels) } + for k in range(n_kernels): + shots_exp[k] = {q: {} for q in self.Qubits} + Shots_qubit[k] = {q: {} for q in self.Qubits} + Shots_qutrit[k] = {q: {} for q in self.Qubits} + + for q in self.Qubits: + # threshold = _get_threshold(Shots_0, Shots_1) + _zero_lvl = np.mean(self.proc_data_dict[q]['Shots_0'][:,0]) + _one_lvl = np.mean(self.proc_data_dict[q]['Shots_1'][:,0]) + # threshold = self.proc_data_dict[q]['projection_01']['threshold'] + threshold = self.proc_data_dict['Thresholds'][q] + _clf = self.proc_data_dict[q]['classifier'] + for r_idx, n_rounds in enumerate(Rounds): + for k in range(n_kernels): + shots_exp[k][q][f'{n_rounds}_R'] = {} + Shots_qubit[k][q][f'{n_rounds}_R'] = {} + Shots_qutrit[k][q][f'{n_rounds}_R'] = {} + # counter for number of shots in previous rounds + _aux = int(n_kernels*np.sum(Rounds[:r_idx])) + if self.heralded_init: + _aux = int(n_kernels*(np.sum(Rounds[:r_idx])+len(Rounds[:r_idx]))) + for r in range(n_rounds): + # Note we are using the rotated shots already + for k in range(n_kernels): + shots_exp[k][q][f'{n_rounds}_R'][f'round {r+1}'] = \ + raw_shots[q][r+k*(n_rounds+self.heralded_init)+self.heralded_init+_aux::_cycle] + # Perform Qubit assignment + if _zero_lvl < threshold: # zero level is left of threshold + for k in range(n_kernels): + Shots_qubit[k][q][f'{n_rounds}_R'][f'round {r+1}'] = \ + np.array([0 if sthreshold else 1 for s in shots_exp[k][q][f'{n_rounds}_R'][f'round {r+1}'][:,0]]) + # Perform Qutrit assignment + for k in range(n_kernels): + Shots_qutrit[k][q][f'{n_rounds}_R'][f'round {r+1}'] = _clf.predict(shots_exp[k][q][f'{n_rounds}_R'][f'round {r+1}']) + # Post selection + if self.heralded_init: + # Sort heralding shots + for k in range(n_kernels): + shots_exp[k][q][f'{n_rounds}_R']['ps'] = raw_shots[q][k*(n_rounds+self.heralded_init)+_aux::_cycle] + # Classify heralding shots + Shots_qutrit[k][q][f'{n_rounds}_R']['round 0'] = _clf.predict(shots_exp[k][q][f'{n_rounds}_R']['ps']) + # Compute post-selection mask + Shots_qutrit[k][q][f'{n_rounds}_R']['ps'] = np.array([ 1 if s == 0 else np.nan for s in Shots_qutrit[k][q][f'{n_rounds}_R']['round 0'] ]) + # Perform post-selection + if self.heralded_init: + for R in Rounds: + _n_shots = len(Shots_qutrit[0][q][f'{R}_R']['ps']) + _mask = { k : np.ones(_n_shots) for k in range(n_kernels) } + for q in self.Qubits: + for k in range(n_kernels): + _mask[k] *= Shots_qutrit[k][q][f'{R}_R']['ps'] + for k in range(n_kernels): + print(f'{R}_R Percentage of post-selected shots {k}: {np.nansum(_mask[k])/len(_mask[k])*100:.2f}%') + for q in self.Qubits: + for r in range(R): + for k in range(n_kernels): + # Remove marked shots in qubit shots + Shots_qubit[k][q][f'{R}_R'][f'round {r+1}'] = \ + Shots_qubit[k][q][f'{R}_R'][f'round {r+1}'][~np.isnan(_mask[k])] + # Remove marked shots in qutrit shots + Shots_qutrit[k][q][f'{R}_R'][f'round {r+1}'] = \ + Shots_qutrit[k][q][f'{R}_R'][f'round {r+1}'][~np.isnan(_mask[k])] + self.proc_data_dict['Shots_qubit'] = {k:Shots_qubit[k] for k in range(n_kernels)} + self.proc_data_dict['Shots_qutrit'] = {k:Shots_qutrit[k] for k in range(n_kernels)} + self.proc_data_dict['Shots_exp'] = {k:shots_exp[k] for k in range(n_kernels)} + #################### + # Calculate leakage + #################### + Population = { k: {q:{} for q in self.Qubits}\ + for k in range(n_kernels) } + Population_f = { k: {q:{} for q in self.Qubits}\ + for k in range(n_kernels) } + def _get_pop_vector(Shots): + p0 = np.mean(Shots==0) + p1 = np.mean(Shots==1) + p2 = np.mean(Shots==2) + return np.array([p0, p1, p2]) + for q in self.Qubits: + M_inv = np.linalg.inv(self.proc_data_dict[q]['Assignment_matrix']) + if q in self.ancilla_qubit: + # For the ancilla qubit we'll calculate + # leakage in every measurement round. + for n_rounds in Rounds: + for k in range(n_kernels): + Population[k][q][f'{n_rounds}_R'] = {} + for r in range(n_rounds): + for k in range(n_kernels): + _pop_vec = _get_pop_vector(Shots_qutrit[k][q][f'{n_rounds}_R'][f'round {r+1}']) + Population[k][q][f'{n_rounds}_R'][f'round {r+1}'] = np.dot(_pop_vec, M_inv) + for k in range(n_kernels): + Population_f[k][q] = np.array([Population[k][q][f'{Rounds[-1]}_R'][key][2] for key in Population[k][q][f'{Rounds[-1]}_R'].keys()]) + else: + # For the data qubit we'll only calculate + # leakage in the last measurement round. + for n_rounds in Rounds: + for k in range(n_kernels): + _pop_vec = _get_pop_vector(Shots_qutrit[k][q][f'{n_rounds}_R'][f'round {n_rounds}']) + Population[k][q][f'{n_rounds}_R'] = np.dot(_pop_vec, M_inv) + for k in range(n_kernels): + Population_f[k][q] = np.array([Population[k][q][key][2] for key in Population[k][q].keys()]) + + self.proc_data_dict['Population'] = Population + self.proc_data_dict['Population_f'] = Population_f + ########################### + ## Leakage postselection + ########################### + Shots_qubit_ps = { k:{} for k in range(n_kernels) } + Ps_fraction = { k : np.ones(Rounds[-1]) for k in range(n_kernels) } + _mask = { k : {} for k in range(n_kernels) } + for k in range(n_kernels): + Shots_qubit_ps[k] = {q: {} for q in self.Qubits} + for q in self.Qubits: + for k in range(n_kernels): + Shots_qubit_ps[k][q] = {f'{R}_R': {} for R in Rounds} + for R in Rounds: + _n_shots = len(Shots_qutrit[k][q][f'{R}_R'][f'round {1}']) + _mask[k] = np.ones(_n_shots) + for r in range(R): + for qa in self.ancilla_qubit: + _mask[k] *= np.array([1 if s != 2 else np.nan for s in Shots_qutrit[k][qa][f'{R}_R'][f'round {r+1}']]) + Ps_fraction[k][r] = np.nansum(_mask[k])/_n_shots + Shots_qubit_ps[k][q][f'{R}_R'][f'round {r+1}'] = Shots_qutrit[k][q][f'{R}_R'][f'round {r+1}']*_mask[k] + ########################### + ## Postselection on first round being zero + ########################### + # Ps_fraction_1 = { k : np.ones(Rounds[-1]) for k in range(n_kernels) } + # _mask_1 = { k : {} for k in range(n_kernels) } + # for k in range(n_kernels): + # _n_shots = len(Shots_qubit_ps[k][f'{Rounds[-1]}_R'][f'round {1}']) + # _mask_1[k] = np.ones(_n_shots) + # for r in range(R): + # _mask_1[k] *= np.array([1 if s != 1 else np.nan for s in Shots_qubit_ps[k][f'{Rounds[-1]}_R'][f'round 1']]) + # Ps_fraction_1[k][r] = np.nansum(_mask_1[k])/_n_shots + # Shots_qubit_ps[k][f'{Rounds[-1]}_R'][f'round {r+1}'] = Shots_qubit_ps[k][f'{Rounds[-1]}_R'][f'round {r+1}']*_mask_1[k] + + ########################### + ## Postselection on first round being zero on spec + ########################### + # Ps_fraction_1_s = { k : np.ones(Rounds[-1]) for k in range(n_kernels) } + # _mask_1_s = { k : {} for k in range(n_kernels) } + # for k in range(n_kernels): + # _mask_1_s[k] = {q: {} for q in self.remaining_ancillas} + # Ps_fraction_1_s[k] = {q: {} for q in self.remaining_ancillas} + # for q in self.remaining_ancillas: + # for k in range(n_kernels): + # _n_shots = len(Shots_qubit_spec_ps[k][q][f'{Rounds[-1]}_R'][f'round {1}']) + # _mask_1_s[k][q] = np.ones(_n_shots) + # for r in range(R): + # _mask_1_s[k][q] *= np.array([1 if s != 1 else np.nan for s in Shots_qubit_spec_ps[k][q][f'{Rounds[-1]}_R'][f'round 1']]) + # Ps_fraction_1_s[k][q][r] = np.nansum(_mask_1_s[k][q])/_n_shots + # Shots_qubit_spec_ps[k][q][f'{Rounds[-1]}_R'][f'round {r+1}'] = Shots_qubit_spec_ps[k][q][f'{Rounds[-1]}_R'][f'round {r+1}']*_mask_1_s[k][q] + + # Shots_qubit_ps = { k:{} for k in range(n_kernels) } + # for k in range(n_kernels): + # Shots_qubit_ps[k] = {f'{R}_R': {} for R in Rounds} + # for R in Rounds: + # for r in range(R): + # Shots_qubit_ps[k][f'{R}_R'][f'round {r+1}'] = \ + # np.array([ s if s!=2 else np.nan for s in Shots_qutrit[k][self.ancilla_qubit][f'{R}_R'][f'round {r+1}'] ]) + + + ######################### + # Calculate Pij matrix + ######################### + if self.Pij_matrix: + _anc_check = [ (q in self.ancilla_qubit) for q in ['Z1', 'Z2', 'Z3', 'Z4'] ] + assert all(_anc_check), 'All ancilla qubits need to be given to analysis for Pij matrix.' + # select experiments for which the Pij matrix will be computed + k_of_interest = [] + if ('surface_13' in self.experiments) or \ + ('surface_13_LRU' in self.experiments) or True: + if ('surface_13' in self.experiments): + k_of_interest.append(self.experiments.index('surface_13')) + if ('surface_13_LRU' in self.experiments): + k_of_interest.append(self.experiments.index('surface_13_LRU')) + else: + k_of_interest = [0] + # Calculate defects each stabilizer ancilla qubits + _Ancilla_qubits = [q for q in self.Qubits if 'Z' in q] + Defects = { q : { k:{} for k in range(n_kernels) } for q in _Ancilla_qubits } + for q in _Ancilla_qubits: + for n_rounds in Rounds: + for k in k_of_interest: + # Data qubits measured by each stabilizer + # (will be used to compute measured data-qubit parity) + stab_data_qubits = list(get_nearest_neighbors(q).keys()) + # Sort final data qubit measurement shots + Data_shots = [Shots_qubit[k][_dq][f'{n_rounds}_R'][f'round {n_rounds}'] \ + for _dq in stab_data_qubits] + # Compute defects + Defects[q][k][f'{n_rounds}_R'] = \ + _calculate_defects(Shots_qubit[k][q][f'{n_rounds}_R'], n_rounds, Data_qubit_meas=Data_shots) + self.proc_data_dict['Defects'] = Defects + self.qoi['Pij_matrix'] = { k : None for k in k_of_interest } + for k in k_of_interest: + if (10 in Rounds): + R = 10 + else: + R = np.max(Rounds) + Pij = np.zeros((R*4, R*4)) + # Order of ancilla qubits in Pij matrix + _Ancilla_qubits_ordered = ['Z3', 'Z1', 'Z4', 'Z2'] + for qi, Qi in enumerate(_Ancilla_qubits_ordered): + for i in range(R): + for qj, Qj in enumerate(_Ancilla_qubits_ordered): + for j in range(R): + if (i==j) and (qi==qj): + Pij[i+R*qi, j+R*qj] = np.nan + elif i+R*qi > j+R*qj: + Pij[i+R*qi, j+R*qj] = np.nan + elif i == 0 or j==0: + Pij[i+R*qi, j+R*qj] = np.nan + else: + defect_qi = Defects[Qi][k][f'{R}_R'][:,i] + defect_qj = Defects[Qj][k][f'{R}_R'][:,j] + xi = (1-defect_qi)/2 + xj = (1-defect_qj)/2 + assert all(np.unique(xi) == np.array([0,1])) + assert all(np.unique(xj) == np.array([0,1])) + Pij[i+R*qi, j+R*qj] = ( np.mean(xi*xj) - np.mean(xi)*np.mean(xj) ) / \ + ( (1-2*np.mean(xi))*(1-2*np.mean(xj)) ) + self.qoi['Pij_matrix'][k] = Pij + else: + print('Pij matrix is only calculated for surface_13 experiment') + ########################### + # Calculate defect rate + ########################### + defect_rate = { q: {} for q in self.ancilla_qubit } + defect_rate_ps = { q: {} for q in self.ancilla_qubit } + for q in self.ancilla_qubit: + defect_rate[q] = { k:{} for k in range(n_kernels) } + defect_rate_ps[q] = { k:{} for k in range(n_kernels) } + for q in self.ancilla_qubit: + for n_rounds in Rounds: + for k in range(n_kernels): + defect_rate[q][k][f'{n_rounds}_R'] = _calculate_defect_rate(Shots_qubit[k][q][f'{n_rounds}_R'], n_rounds, with_reset=self.with_reset) + for q in self.ancilla_qubit: + for k in range(n_kernels): + defect_rate_ps[q][k][f'{n_rounds}_R'] = _calculate_defect_rate(Shots_qubit_ps[k][q][f'{n_rounds}_R'], n_rounds, with_reset=self.with_reset) + + self.qoi['defect_rate'] = defect_rate + self.qoi['defect_rate_ps'] = defect_rate_ps + self.proc_data_dict['Shots_qubit_ps'] = Shots_qubit_ps + self.proc_data_dict['Ps_fraction'] = Ps_fraction + # self.proc_data_dict['quantities_of_interest'] = self.qoi + + def prepare_plots(self): + self.axs_dict = {} + + for qubit in self.Qubits: + fig = plt.figure(figsize=(10,5), dpi=100) + axs = [fig.add_subplot(121), + fig.add_subplot(322), + fig.add_subplot(324), + fig.add_subplot(326)] + # fig.patch.set_alpha(0) + self.axs_dict[f'IQ_readout_histogram_{qubit}'] = axs[0] + self.figs[f'IQ_readout_histogram_{qubit}'] = fig + self.plot_dicts[f'IQ_readout_histogram_{qubit}'] = { + 'plotfn': ssro_IQ_projection_plotfn_3, + 'ax_id': f'IQ_readout_histogram_{qubit}', + 'shots_0': self.proc_data_dict[qubit]['Shots_0'], + 'shots_1': self.proc_data_dict[qubit]['Shots_1'], + 'shots_2': self.proc_data_dict[qubit]['Shots_2'], + 'projection_01': self.proc_data_dict[qubit]['projection_01'], + 'projection_12': self.proc_data_dict[qubit]['projection_12'], + 'projection_02': self.proc_data_dict[qubit]['projection_02'], + 'classifier': self.proc_data_dict[qubit]['classifier'], + 'dec_bounds': self.proc_data_dict[qubit]['dec_bounds'], + 'Fid_dict': self.proc_data_dict[qubit]['Fid_dict'], + 'qubit': qubit, + 'timestamp': self.timestamp + } + + for qa in self.ancilla_qubit: + if qa in ['Z2', 'Z3', 'X1', 'X4']: + fig = plt.figure(figsize=(11,3)) + elif qa in ['Z1', 'Z4', 'X2', 'X3']: + fig = plt.figure(figsize=(12,3)) + gs = fig.add_gridspec(1, 2+len([qa]+self.data_qubits)) + axs = [] + axs.append(fig.add_subplot(gs[0, 0:2])) + for i, q in enumerate([qa]+self.data_qubits): + axs.append(fig.add_subplot(gs[0, 2+i:3+i])) + self.axs_dict[f'Deffect_rate_plot_{qa}'] = axs[0] + self.figs[f'Deffect_rate_plot_{qa}'] = fig + self.plot_dicts[f'Deffect_rate_plot_{qa}'] = { + 'plotfn': defect_rate_k_plotfn, + 'ax_id': f'Deffect_rate_plot_{qa}', + 'Rounds': self.Rounds, + 'defect_rate': self.qoi['defect_rate'][qa], + 'defect_rate_ps': self.qoi['defect_rate_ps'][qa], + 'Population': self.proc_data_dict['Population_f'], + 'experiments': self.experiments, + 'qubit': qa, + 'data_qubits': self.data_qubits, + 'timestamp': self.timestamp + } + + if self.Pij_matrix: + for k in self.qoi['Pij_matrix'].keys(): + + fig, ax = plt.subplots(figsize=(8,8), dpi=200) + self.axs_dict[f'Pij_matrix_{k}'] = ax + self.figs[f'Pij_matrix_{k}'] = fig + self.plot_dicts[f'Pij_matrix_{k}'] = { + 'plotfn': Pij_matrix_plofn, + 'ax_id': f'Pij_matrix_{k}', + 'Pij': self.qoi['Pij_matrix'][k], + 'timestamp': self.timestamp, + } + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) +def defect_rate_k_plotfn( + Rounds, + defect_rate, + defect_rate_ps, + Population, + timestamp, + qubit, + data_qubits, + experiments, + ax, **kw): + fig = ax.get_figure() + axs = fig.get_axes() + + n_rounds = Rounds[-1] + for k in defect_rate.keys(): + axs[0].plot((np.arange(n_rounds)+1)[1:], defect_rate[k][f'{Rounds[-1]}_R'][1:], f'C{k}-', label=experiments[k]) + axs[0].plot((np.arange(n_rounds)+1)[1:], defect_rate_ps[k][f'{Rounds[-1]}_R'][1:], f'C{k}--') + axs[0].grid(ls='--') + axs[0].set_ylabel('error probability') + axs[0].set_xlabel('rounds') + axs[0].set_title('defect rate') + axs[0].set_ylim(0, .5) + # axs[0].set_ylim(0, 1) + axs[0].set_yticks([0, .1, .2, .3, .4, .5]) + axs[0].set_yticks([0.05, .15, .25, .35, .45], minor=True) + + for k in defect_rate.keys(): + axs[1].plot((np.arange(n_rounds)+1), Population[k][qubit]*100, f'C{k}-', label=experiments[k]) + axs[1].set_ylabel(r'Leakage population (%)') + axs[1].set_xlabel('rounds') + axs[1].set_title(qubit) + axs[1].grid(ls='--') + + for i, q in enumerate(data_qubits): + for k in Population.keys(): + axs[2+i].plot(Rounds, (Population[k][q])*100, f'C{k}.-', label=experiments[k]) + # axs[2+i].set_ylabel(r'Leakage population (%)') + axs[2+i].set_xlabel('rounds') + axs[2+i].set_title(q) + axs[2+i].grid(ls='--') + + axs[-1].legend(frameon=False, bbox_to_anchor = (1.1, 1)) + fig.suptitle(f'{timestamp}\n{qubit} repeated stabilizer experiment') + fig.tight_layout() + +def ssro_IQ_projection_plotfn_3( + shots_0, + shots_1, + shots_2, + projection_01, + projection_12, + projection_02, + classifier, + dec_bounds, + Fid_dict, + timestamp, + qubit, + ax, **kw): + fig = ax.get_figure() + axs = fig.get_axes() + # Fit 2D gaussians + from scipy.optimize import curve_fit + def twoD_Gaussian(data, amplitude, x0, y0, sigma_x, sigma_y, theta): + x, y = data + x0 = float(x0) + y0 = float(y0) + a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2) + b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2) + c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2) + g = amplitude*np.exp( - (a*((x-x0)**2) + 2*b*(x-x0)*(y-y0) + + c*((y-y0)**2))) + return g.ravel() + def _fit_2D_gaussian(X, Y): + counts, _x, _y = np.histogram2d(X, Y, bins=[100, 100], density=True) + x = (_x[:-1] + _x[1:]) / 2 + y = (_y[:-1] + _y[1:]) / 2 + _x, _y = np.meshgrid(_x, _y) + x, y = np.meshgrid(x, y) + p0 = [counts.max(), np.mean(X), np.mean(Y), np.std(X), np.std(Y), 0] + popt, pcov = curve_fit(twoD_Gaussian, (x, y), counts.T.ravel(), p0=p0) + return popt + popt_0 = _fit_2D_gaussian(shots_0[:,0], shots_0[:,1]) + popt_1 = _fit_2D_gaussian(shots_1[:,0], shots_1[:,1]) + popt_2 = _fit_2D_gaussian(shots_2[:,0], shots_2[:,1]) + # Plot stuff + axs[0].plot(shots_0[:,0], shots_0[:,1], '.', color='C0', alpha=0.05) + axs[0].plot(shots_1[:,0], shots_1[:,1], '.', color='C3', alpha=0.05) + axs[0].plot(shots_2[:,0], shots_2[:,1], '.', color='C2', alpha=0.05) + axs[0].plot([0, popt_0[1]], [0, popt_0[2]], '--', color='k', lw=.5) + axs[0].plot([0, popt_1[1]], [0, popt_1[2]], '--', color='k', lw=.5) + axs[0].plot([0, popt_2[1]], [0, popt_2[2]], '--', color='k', lw=.5) + axs[0].plot(popt_0[1], popt_0[2], '.', color='C0', label='ground') + axs[0].plot(popt_1[1], popt_1[2], '.', color='C3', label='excited') + axs[0].plot(popt_2[1], popt_2[2], '.', color='C2', label='$2^\mathrm{nd}$ excited') + axs[0].plot(popt_0[1], popt_0[2], 'x', color='white') + axs[0].plot(popt_1[1], popt_1[2], 'x', color='white') + axs[0].plot(popt_2[1], popt_2[2], 'x', color='white') + # Draw 4sigma ellipse around mean + from matplotlib.patches import Ellipse + circle_0 = Ellipse((popt_0[1], popt_0[2]), + width=4*popt_0[3], height=4*popt_0[4], + angle=-popt_0[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[0].add_patch(circle_0) + circle_1 = Ellipse((popt_1[1], popt_1[2]), + width=4*popt_1[3], height=4*popt_1[4], + angle=-popt_1[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[0].add_patch(circle_1) + circle_2 = Ellipse((popt_2[1], popt_2[2]), + width=4*popt_2[3], height=4*popt_2[4], + angle=-popt_2[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[0].add_patch(circle_2) + # Plot classifier zones + from matplotlib.patches import Polygon + _all_shots = np.concatenate((shots_0, shots_1, shots_2)) + _lim = np.max([ np.max(np.abs(_all_shots[:,0]))*1.1, np.max(np.abs(_all_shots[:,1]))*1.1 ]) + Lim_points = {} + for bound in ['01', '12', '02']: + dec_bounds['mean'] + _x0, _y0 = dec_bounds['mean'] + _x1, _y1 = dec_bounds[bound] + a = (_y1-_y0)/(_x1-_x0) + b = _y0 - a*_x0 + _xlim = 1e2*np.sign(_x1-_x0) + _ylim = a*_xlim + b + Lim_points[bound] = _xlim, _ylim + # Plot 0 area + _points = [dec_bounds['mean'], Lim_points['01'], Lim_points['02']] + _patch = Polygon(_points, color='C0', alpha=0.2, lw=0) + axs[0].add_patch(_patch) + # Plot 1 area + _points = [dec_bounds['mean'], Lim_points['01'], Lim_points['12']] + _patch = Polygon(_points, color='C3', alpha=0.2, lw=0) + axs[0].add_patch(_patch) + # Plot 2 area + _points = [dec_bounds['mean'], Lim_points['02'], Lim_points['12']] + _patch = Polygon(_points, color='C2', alpha=0.2, lw=0) + axs[0].add_patch(_patch) + # Plot decision boundary + for bound in ['01', '12', '02']: + _x0, _y0 = dec_bounds['mean'] + _x1, _y1 = Lim_points[bound] + axs[0].plot([_x0, _x1], [_y0, _y1], 'k--', lw=1) + axs[0].set_xlim(-_lim, _lim) + axs[0].set_ylim(-_lim, _lim) + axs[0].legend(frameon=False) + axs[0].set_xlabel('Integrated voltage I') + axs[0].set_ylabel('Integrated voltage Q') + axs[0].set_title(f'IQ plot qubit {qubit}') + fig.suptitle(f'{timestamp}\n') + ########################## + # Plot projections + ########################## + # 01 projection + _bin_c = projection_01['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[1].bar(_bin_c, projection_01['h0'], bin_width, fc='C0', alpha=0.4) + axs[1].bar(_bin_c, projection_01['h1'], bin_width, fc='C3', alpha=0.4) + axs[1].plot(_bin_c, double_gauss(_bin_c, *projection_01['popt0']), '-C0') + axs[1].plot(_bin_c, double_gauss(_bin_c, *projection_01['popt1']), '-C3') + axs[1].axvline(projection_01['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_01["Fid"]*100:.1f}%', + f'SNR : {projection_01["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[1].text(.775, .9, text, transform=axs[1].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[1].text(projection_01['popt0'][0], projection_01['popt0'][4]/2, + r'$|g\rangle$', ha='center', va='center', color='C0') + axs[1].text(projection_01['popt1'][0], projection_01['popt1'][4]/2, + r'$|e\rangle$', ha='center', va='center', color='C3') + axs[1].set_xticklabels([]) + axs[1].set_xlim(_bin_c[0], _bin_c[-1]) + axs[1].set_ylim(bottom=0) + axs[1].set_title('Projection of data') + # 12 projection + _bin_c = projection_12['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[2].bar(_bin_c, projection_12['h1'], bin_width, fc='C3', alpha=0.4) + axs[2].bar(_bin_c, projection_12['h2'], bin_width, fc='C2', alpha=0.4) + axs[2].plot(_bin_c, double_gauss(_bin_c, *projection_12['popt1']), '-C3') + axs[2].plot(_bin_c, double_gauss(_bin_c, *projection_12['popt2']), '-C2') + axs[2].axvline(projection_12['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_12["Fid"]*100:.1f}%', + f'SNR : {projection_12["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[2].text(.775, .9, text, transform=axs[2].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[2].text(projection_12['popt1'][0], projection_12['popt1'][4]/2, + r'$|e\rangle$', ha='center', va='center', color='C3') + axs[2].text(projection_12['popt2'][0], projection_12['popt2'][4]/2, + r'$|f\rangle$', ha='center', va='center', color='C2') + axs[2].set_xticklabels([]) + axs[2].set_xlim(_bin_c[0], _bin_c[-1]) + axs[2].set_ylim(bottom=0) + # 02 projection + _bin_c = projection_02['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[3].bar(_bin_c, projection_02['h0'], bin_width, fc='C0', alpha=0.4) + axs[3].bar(_bin_c, projection_02['h2'], bin_width, fc='C2', alpha=0.4) + axs[3].plot(_bin_c, double_gauss(_bin_c, *projection_02['popt0']), '-C0') + axs[3].plot(_bin_c, double_gauss(_bin_c, *projection_02['popt2']), '-C2') + axs[3].axvline(projection_02['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_02["Fid"]*100:.1f}%', + f'SNR : {projection_02["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[3].text(.775, .9, text, transform=axs[3].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[3].text(projection_02['popt0'][0], projection_02['popt0'][4]/2, + r'$|g\rangle$', ha='center', va='center', color='C0') + axs[3].text(projection_02['popt2'][0], projection_02['popt2'][4]/2, + r'$|f\rangle$', ha='center', va='center', color='C2') + axs[3].set_xticklabels([]) + axs[3].set_xlim(_bin_c[0], _bin_c[-1]) + axs[3].set_ylim(bottom=0) + axs[3].set_xlabel('Integrated voltage') + # Write fidelity textbox + text = '\n'.join(('Assignment fidelity:', + f'$F_g$ : {Fid_dict["0"]*100:.1f}%', + f'$F_e$ : {Fid_dict["1"]*100:.1f}%', + f'$F_f$ : {Fid_dict["2"]*100:.1f}%', + f'$F_\mathrm{"{avg}"}$ : {Fid_dict["avg"]*100:.1f}%')) + props = dict(boxstyle='round', facecolor='gray', alpha=.2) + axs[1].text(1.05, 1, text, transform=axs[1].transAxes, + verticalalignment='top', bbox=props) + +def Pij_matrix_plofn( + Pij, + timestamp, + ax, **kw): + fig = ax.get_figure() + # Plot matrix + im1 = ax.matshow(np.abs(Pij).T, cmap='Blues', vmin=0, vmax=None) + im2 = ax.matshow( np.abs(Pij), cmap='Reds', vmin=0, vmax=0.05) + # Set ticks + R = int(Pij.shape[0]/4) + ax.set_xticks(np.arange(0, 4*R, R)-.5) + ax.set_yticks(np.arange(0, 4*R, R)-.5) + from matplotlib.ticker import MultipleLocator + ax.xaxis.set_minor_locator(MultipleLocator(1)) + ax.yaxis.set_minor_locator(MultipleLocator(1)) + ax.set_xticklabels([]) + ax.set_yticklabels([]) + # Write qubit labels + _Ancilla_qubits_ordered = ['Z3', 'Z1', 'Z4', 'Z2'] + for i, q in enumerate(_Ancilla_qubits_ordered): + ax.text(i*R+(R-1)/2, -3.5, q, va='center', ha='center', size=12) + ax.text(-3.5, i*R+(R-1)/2, q, va='center', ha='center', size=12) + for i in range(R): + for j in range(4): + ax.text(-1.5, i+R*j, i, va='center', ha='center', size=5.5) + ax.text(i+R*j, -1.5, i, va='center', ha='center', size=5.5) + # Plot tick lines + for i in range(3): + ax.axhline((i+1)*R-.5, color='gainsboro', alpha=1) + ax.axvline((i+1)*R-.5, color='gainsboro', alpha=1) + for i in range(4*R-1): + ax.axhline((i+1)-.5, color='gainsboro', alpha=1, lw=.75) + ax.axvline((i+1)-.5, color='gainsboro', alpha=1, lw=.75) + # Plot colorbar + cb1 = fig.colorbar(im1, aspect=40) + cb2 = fig.colorbar(im2, aspect=40) + pos = cb1.ax.get_position() + cb1.ax.set_position([pos.x0-.1+.02, pos.y0+.26/2, pos.width, pos.height-0.26]) + cb1.ax.yaxis.set_ticks_position('left') + pos = cb2.ax.get_position() + cb2.ax.set_position([pos.x0+0.05+.02, pos.y0+.26/2, pos.width, pos.height-0.26]) + cb2.set_label('$\\mathrm{{P_{{i,j}}}}$ matrix coefficients') + # Plot title + ax.set_title(f'{timestamp}\n$\\mathrm{{P_{{i,j}}}}$ matrix', pad=40) \ No newline at end of file diff --git a/pycqed/analysis_v2/Two_qubit_gate_analysis.py b/pycqed/analysis_v2/Two_qubit_gate_analysis.py index 3aded030d5..5019c8039f 100644 --- a/pycqed/analysis_v2/Two_qubit_gate_analysis.py +++ b/pycqed/analysis_v2/Two_qubit_gate_analysis.py @@ -2,9 +2,386 @@ import matplotlib.pyplot as plt import numpy as np import pycqed.analysis_v2.base_analysis as ba -from pycqed.analysis.analysis_toolbox import get_datafilepath_from_timestamp +from pycqed.analysis.analysis_toolbox import get_datafilepath_from_timestamp,\ + get_timestamps_in_range import pycqed.measurement.hdf5_data as h5d -from matplotlib.colors import to_rgba +from matplotlib.colors import to_rgba, LogNorm +from pycqed.analysis.tools.plotting import hsluv_anglemap45 +import itertools +from pycqed.analysis.analysis_toolbox import set_xlabel +from pycqed.utilities.general import get_gate_directions, get_parking_qubits +from pycqed.utilities.general import print_exception + + +def Chevron(delta, t, g, delta_0, a, b, phi): + ''' + Fit function for chevron function. + Args: + delta : Detuning of qubit + t : duration of pulse + g : coupling of avoided crossing + delta_0 : detuning at avoided crossing + a : scale factor used for fitting + b : offset factor used for fitting + phi : phase offset used for fitting (this + accounts for pulse distortion) + ''' + g_rad = g*2*np.pi + delta_rad = delta*2*np.pi + delta_0_rad = delta_0*2*np.pi + # Frequency of Chevron oscillation + Omega = np.sqrt((delta_rad-delta_0_rad)**2+(2*g_rad)**2) + # Amplitude of Chevron oscillation + Osc_amp = (2*g_rad)**2 / ((delta_rad-delta_0_rad)**2+(2*g_rad)**2) + # Population of Chevron oscillation + pop = Osc_amp*(1-np.cos(Omega*t+phi))/2 + return a*pop + b + +class Chevron_Analysis(ba.BaseDataAnalysis): + """ + Analysis for Chevron routine + """ + def __init__(self, + Poly_coefs: float, + QH_freq: float, + QL_det: float, + avoided_crossing: str = "11-02", + Out_range: float = 5, + DAC_amp: float = 0.5, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + auto=True): + + super().__init__(t_start=t_start, + t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) + self.Out_range = Out_range + self.DAC_amp = DAC_amp + self.Poly_coefs = Poly_coefs + self.QH_freq = QH_freq + self.QL_det = QL_det + self.avoided_crossing = avoided_crossing + if auto: + self.run_analysis() + + def extract_data(self): + self.get_timestamps() + self.timestamp = self.timestamps[0] + + data_fp = get_datafilepath_from_timestamp(self.timestamp) + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + self.raw_data_dict = h5d.extract_pars_from_datafile( + data_fp, param_spec) + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + + def process_data(self): + # Get qubit names + self.QH = self.raw_data_dict['folder'].split(' ')[-3] + self.QL = self.raw_data_dict['folder'].split(' ')[-2] + self.proc_data_dict = {} + # Sort data + Amps = np.unique(self.raw_data_dict['data'][:,0]) + Times = np.unique(self.raw_data_dict['data'][:,1]) + Pop_H = self.raw_data_dict['data'][:,2] + Pop_L = self.raw_data_dict['data'][:,3] + nx, ny = len(Amps), len(Times) + Pop_H = Pop_H.reshape(ny, nx) + Pop_L = Pop_L.reshape(ny, nx) + # Convert amplitude to detuning (frequency) + P_func = np.poly1d(self.Poly_coefs) + Out_voltage = Amps*self.DAC_amp*self.Out_range/2 + Detunings = P_func(Out_voltage) + # Fit Chevron + from scipy.optimize import curve_fit + def fit_func(xy, g, delta_0, a, b, phi): + delta, time = xy + outcome = Chevron(delta, time, g, delta_0, a, b, phi) + return outcome.ravel() + # perform fit + x, y = np.meshgrid(Detunings, Times) + z = Pop_L + # initial guess + idx_det0 = np.argmax(np.mean(z, axis=0)) + p0 = [11e6, # g + Detunings[idx_det0], # delta_0 + np.max(z)-np.min(z), # a + np.min(z), # b + 0, # phi + ] + popt, pcov = curve_fit(fit_func, (x,y), z.ravel(), p0=p0) + detuning_freq = popt[1] + detuning_amp = np.max((P_func-detuning_freq).roots) + T_p = abs((np.pi-popt[4])/(2*np.pi*popt[0])) + # Save data + self.proc_data_dict['Out_voltage'] = Out_voltage + self.proc_data_dict['Detunings'] = Detunings + self.proc_data_dict['Times'] = Times + self.proc_data_dict['Pop_H'] = Pop_H + self.proc_data_dict['Pop_L'] = Pop_L + self.proc_data_dict['Fit_params'] = popt + self.qoi = {'coupling': popt[0], + 'detuning_freq': detuning_freq, + 'detuning_amp': detuning_amp, + 'Tp': T_p} + + def prepare_plots(self): + self.axs_dict = {} + fig, axs = plt.subplots(figsize=(12*.8,5*.8), ncols=3, dpi=100) + self.figs[f'Chevron'] = fig + self.axs_dict[f'Chevron'] = axs[0] + self.plot_dicts[f'Chevron']={ + 'plotfn': Chevron_plotfn, + 'ax_id': f'Chevron', + 'Detunings' : self.proc_data_dict['Detunings'], + 'Out_voltage' : self.proc_data_dict['Out_voltage'], + 'Times' : self.proc_data_dict['Times'], + 'Pop_H' : self.proc_data_dict['Pop_H'], + 'Pop_L' : self.proc_data_dict['Pop_L'], + 'f0' : self.qoi['detuning_freq'], + 'a0' : self.qoi['detuning_amp'], + 'tp' : self.qoi['Tp'], + 'ts' : self.timestamp, + 'qH' : self.QH, 'qL' : self.QL, + 'qH_freq' : self.QH_freq, 'qL_det' : self.QL_det, + 'poly_coefs' : self.Poly_coefs, + 'avoided_crossing' : self.avoided_crossing, + 'Fit_params' : self.proc_data_dict['Fit_params'], + } + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def Chevron_plotfn( + ax, + qH, qL, + qH_freq, qL_det, + poly_coefs, + Detunings, + Out_voltage, + avoided_crossing, + Fit_params, + Times, + Pop_H, + Pop_L, + f0, a0, tp, + ts, **kw): + fig = ax.get_figure() + axs = fig.get_axes() + + # Avoided crossing plot + p_func = np.poly1d(poly_coefs) + + Voltage_axis = np.linspace(0, Out_voltage[-1]*1.2, 201) + Frequency_axis = qH_freq-p_func(Voltage_axis) + axs[2].plot(Voltage_axis, Frequency_axis*1e-9, 'C0-') + if qL_det != 0 : + axs[2].axhline([(qH_freq-f0+qL_det)*1e-9], color='k', ls='--', alpha=.25) + axs[2].axhline([(qH_freq-f0)*1e-9], color='k', ls='--') + axs[2].text((Voltage_axis[0]+(Voltage_axis[-1]-Voltage_axis[0])*.02), + (qH_freq-f0+(Frequency_axis[-1]-Frequency_axis[0])*.03)*1e-9, + f'$f_{{{avoided_crossing}}}$', color='k', size=12, va='top') + axs[2].plot([a0], [(qH_freq-f0)*1e-9], 'C3.') + axs[2].set_xlabel('Output voltage (V)') + axs[2].set_ylabel(f'{qH} frequency (GHz)') + axs[2].set_xlim(Voltage_axis[0], Voltage_axis[-1]) + axs[2].set_title('Frequency scheme') + # axt = axs[2].twinx() + + # Chevrons plot + def get_plot_axis(vals, rang=None): + dx = vals[1]-vals[0] + X = np.concatenate((vals, [vals[-1]+dx])) - dx/2 + return X + Detunings = get_plot_axis(Detunings) + Times = get_plot_axis(Times) + # High frequency qubit population + axs[0].pcolormesh(Detunings*1e-6, Times*1e9, Pop_H) + axs[0].set_xlabel(f'{qH} detuning (MHz)') + axs[0].set_ylabel('Duration (ns)') + axs[0].set_title(f'Population {qH}') + axs[0].axvline(f0*1e-6, color='w', ls='--') + axs[0].axhline(tp/2*1e9, color='w', ls='--') + axs[0].plot([f0*1e-6], [tp/2*1e9], 'C3.') + axt0 = axs[0].twiny() + axt0.set_xlim((qH_freq*1e-6-np.array(axs[0].get_xlim()))*1e-3) + axt0.set_xlabel(f'{qH} Frequency (GHz)') + # Low frequency qubit population + axs[1].pcolormesh(Detunings*1e-6, Times*1e9, Pop_L) + axs[1].set_xlabel(f'{qH} detuning (MHz)') + axs[1].axvline(f0*1e-6, color='w', ls='--') + axs[1].axhline(tp/2*1e9, color='w', ls='--') + axs[1].plot([f0*1e-6], [tp/2*1e9], 'C3.') + axs[1].text((Detunings[0]+(Detunings[-1]-Detunings[0])*.02)*1e-6, (tp/2+(Times[-1]-Times[0])*.03)*1e9, + f'$t_p/2={tp/2*1e9:.2f}$ ns', color='w', size=12) + axs[1].text((Detunings[0]+(Detunings[-1]-Detunings[0])*.02)*1e-6, (Times[0]+(Times[-1]-Times[0])*.03)*1e9, + f'$\\Delta={f0*1e-6:.2f}$ MHz', color='w', size=12) + axs[1].text((Detunings[0]+(Detunings[-1]-Detunings[0])*.02)*1e-6, (Times[-1]-(Times[-1]-Times[0])*.03)*1e9, + f'$J_2={Fit_params[0]*1e-6:.2f}$ MHz', color='w', size=12, va='top') + axs[1].set_title(f'Population {qL}') + axt1 = axs[1].twiny() + axt1.set_xlim((qH_freq*1e-6-np.array(axs[1].get_xlim()))*1e-3) + axt1.set_xlabel(f'{qH} Frequency (GHz)') + # Add Chevron fit contours + X = np.linspace(Detunings[0], Detunings[-1], 201) + Y = np.linspace(Times[0], Times[-1], 201) + _X, _Y = np.meshgrid(X, Y) + Z = Chevron(_X, _Y, *Fit_params) + Z = (Z - np.min(Z))/(np.max(Z)-np.min(Z)) + for c_lvl, alpha in zip([.05, .2, .5], [.1, .2, .5]): + axs[0].contour(X*1e-6, Y*1e9, Z, [c_lvl], colors=['w'], + linewidths=[1], linestyles=['--'], alpha=alpha) + axs[1].contour(X*1e-6, Y*1e9, Z, [c_lvl], colors=['w'], + linewidths=[1], linestyles=['--'], alpha=alpha) + + fig.suptitle(f'{ts}\nChevron {qH}, {qL}', y=.95) + fig.tight_layout() + + +class TLS_landscape_Analysis(ba.BaseDataAnalysis): + """ + Analysis for TLS landscape + """ + def __init__(self, + Q_freq: float, + Poly_coefs: float, + Out_range: float = 5, + DAC_amp: float = 0.5, + interaction_freqs: dict = None, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + auto=True): + + super().__init__(t_start=t_start, + t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) + self.Out_range = Out_range + self.DAC_amp = DAC_amp + self.Poly_coefs = Poly_coefs + self.Q_freq = Q_freq + self.interaction_freqs = interaction_freqs + if auto: + self.run_analysis() + + def extract_data(self): + self.get_timestamps() + self.timestamp = self.timestamps[0] + + data_fp = get_datafilepath_from_timestamp(self.timestamp) + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + self.raw_data_dict = h5d.extract_pars_from_datafile( + data_fp, param_spec) + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + + def process_data(self): + # Get qubit names + self.Q_name = self.raw_data_dict['folder'].split(' ')[-3] + self.proc_data_dict = {} + # Sort data + Amps = np.unique(self.raw_data_dict['data'][:,0]) + Times = np.unique(self.raw_data_dict['data'][:,1]) + Pop = self.raw_data_dict['data'][:,2] + nx, ny = len(Amps), len(Times) + Pop = Pop.reshape(ny, nx) + # Convert amplitude to detuning (frequency) + P_func = np.poly1d(self.Poly_coefs) + Out_voltage = Amps*self.DAC_amp*self.Out_range/2 + Detunings = P_func(Out_voltage) + # Save data + self.proc_data_dict['Out_voltage'] = Out_voltage + self.proc_data_dict['Detunings'] = Detunings + self.proc_data_dict['Times'] = Times + self.proc_data_dict['Pop'] = Pop + + def prepare_plots(self): + self.axs_dict = {} + fig, ax = plt.subplots(figsize=(10,4), dpi=100) + self.figs[f'TLS_landscape'] = fig + self.axs_dict[f'TLS_landscape'] = ax + self.plot_dicts[f'TLS_landscape']={ + 'plotfn': TLS_landscape_plotfn, + 'ax_id': f'TLS_landscape', + 'Detunings' : self.proc_data_dict['Detunings'], + 'Out_voltage' : self.proc_data_dict['Out_voltage'], + 'Times' : self.proc_data_dict['Times'], + 'Pop' : self.proc_data_dict['Pop'], + 'Q_name' : self.Q_name, + 'Q_freq' : self.Q_freq, + 'interaction_freqs' : self.interaction_freqs, + 'ts' : self.timestamp, + } + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def TLS_landscape_plotfn( + ax, + Q_name, + Q_freq, + Detunings, + Out_voltage, + Times, + Pop, + ts, + interaction_freqs=None, + **kw): + fig = ax.get_figure() + # Chevrons plot + def get_plot_axis(vals, rang=None): + if len(vals)>1: + dx = vals[1]-vals[0] + X = np.concatenate((vals, [vals[-1]+dx])) - dx/2 + else: + X = vals + return X + Detunings = get_plot_axis(Detunings) + Times = get_plot_axis(Times) + # Frequency qubit population + vmax = min([1, np.max(Pop)]) + vmax = max([vmax, 0.15]) + im = ax.pcolormesh(Detunings*1e-6, Times*1e9, Pop, vmax=1)#vmax) + fig.colorbar(im, ax=ax, label='Population') + # plot two-qubit gate frequencies: + if interaction_freqs: + for gate, freq in interaction_freqs.items(): + if freq > 10e6: + ax.axvline(freq*1e-6, color='w', ls='--') + ax.text(freq*1e-6, np.mean(Times)*1e9, + f'CZ {gate}', va='center', ha='right', + color='w', rotation=90) + ax.set_xlabel(f'{Q_name} detuning (MHz)') + ax.set_ylabel('Duration (ns)') + ax.set_title(f'Population {Q_name}') + axt0 = ax.twiny() + axt0.set_xlim((Q_freq*1e-6-np.array(ax.get_xlim()))*1e-3) + axt0.set_xlabel(f'{Q_name} Frequency (GHz)') + fig.suptitle(f'{ts}\nTLS landscape {Q_name}', y=.95) + fig.tight_layout() class Two_qubit_gate_tomo_Analysis(ba.BaseDataAnalysis): @@ -221,8 +598,6 @@ def run_post_extract(self): close_figs=self.options_dict.get('close_figs', True), tag_tstamp=self.options_dict.get('tag_tstamp', True)) - - def Tomo_plotfn_1(ax, data, **kw): ax.set_position((.0, .76, 0.4, .14)) ax.bar([0], [1], ls='--', ec='k', fc=to_rgba('purple', alpha=.1)) @@ -235,7 +610,6 @@ def Tomo_plotfn_1(ax, data, **kw): ax.text(1.65, .75, r'Control $|0\rangle$') ax.set_title('Pauli expectation values') - def Tomo_plotfn_2(ax, data, **kw): ax.set_position((.0, .6, 0.4, .14)) ax.bar([0], [-1], ls='--', ec='k', fc=to_rgba('purple', alpha=.1)) @@ -262,7 +636,6 @@ def Calibration_plotfn(ax, Cal_0, Cal_1, Cal_2, labels, **kw): for lh in leg.legendHandles: lh.set_alpha(1) - def Equator_plotfn(ax, r_off, phi_off, r_on, phi_on, **kw): ax.set_position((0.02, .25, 0.23, 0.23)) ax.set_rlim(0, 1) @@ -275,7 +648,6 @@ def Equator_plotfn(ax, r_off, phi_off, r_on, phi_on, **kw): ax.set_title('Projection onto equator', pad=20) ax.legend(loc=8, frameon=False, fontsize=7) - def Leakage_plotfn(ax, Leakage_off, Leakage_on, **kw): ax.set_position((0.35, .27, 0.15, 0.24)) ax.bar([0,1], [Leakage_off, Leakage_on], fc=to_rgba('C2', alpha=1)) @@ -286,7 +658,6 @@ def Leakage_plotfn(ax, Leakage_off, Leakage_on, **kw): ax.set_ylabel(r'P$(|2\rangle)$ (%)') ax.set_title(r'Leakage $|2\rangle$') - def Param_table_plotfn(ax, phi_off, phi_on, @@ -314,4 +685,3671 @@ def Param_table_plotfn(ax, table.set_fontsize(12) table.scale(1.5, 1.5) ax.text(-.4,-.5, 'Cphase: {:.2f}$^o$'.format((phi_on-phi_off)*180/np.pi), fontsize=14) - ax.text(-.4,-.9, 'Leakage diff: {:.2f} %'.format(Leakage_on-Leakage_off), fontsize=14) \ No newline at end of file + ax.text(-.4,-.9, 'Leakage diff: {:.2f} %'.format(Leakage_on-Leakage_off), fontsize=14) + + +def _rotate_and_center_data(I, Q, vec0, vec1, phi=0): + vector = vec1-vec0 + angle = np.arctan(vector[1]/vector[0]) + rot_matrix = np.array([[ np.cos(-angle+phi),-np.sin(-angle+phi)], + [ np.sin(-angle+phi), np.cos(-angle+phi)]]) + proc = np.array((I, Q)) + proc = np.dot(rot_matrix, proc) + return proc.transpose() + +def _calculate_fid_and_threshold(x0, n0, x1, n1): + """ + Calculate fidelity and threshold from histogram data: + x0, n0 is the histogram data of shots 0 (value and occurences), + x1, n1 is the histogram data of shots 1 (value and occurences). + """ + # Build cumulative histograms of shots 0 + # and 1 in common bins by interpolation. + all_x = np.unique(np.sort(np.concatenate((x0, x1)))) + cumsum0, cumsum1 = np.cumsum(n0), np.cumsum(n1) + ecumsum0 = np.interp(x=all_x, xp=x0, fp=cumsum0, left=0) + necumsum0 = ecumsum0/np.max(ecumsum0) + ecumsum1 = np.interp(x=all_x, xp=x1, fp=cumsum1, left=0) + necumsum1 = ecumsum1/np.max(ecumsum1) + # Calculate optimal threshold and fidelity + F_vs_th = (1-(1-abs(necumsum0 - necumsum1))/2) + opt_idxs = np.argwhere(F_vs_th == np.amax(F_vs_th)) + opt_idx = int(round(np.average(opt_idxs))) + F_assignment_raw = F_vs_th[opt_idx] + threshold_raw = all_x[opt_idx] + return F_assignment_raw, threshold_raw + +def _fit_double_gauss(x_vals, hist_0, hist_1, + _x0_guess=None, _x1_guess=None): + ''' + Fit two histograms to a double gaussian with + common parameters. From fitted parameters, + calculate SNR, Pe0, Pg1, Teff, Ffit and Fdiscr. + ''' + from scipy.optimize import curve_fit + # Double gaussian model for fitting + def _gauss_pdf(x, x0, sigma): + return np.exp(-((x-x0)/sigma)**2/2) + global double_gauss + def double_gauss(x, x0, x1, sigma0, sigma1, A, r): + _dist0 = A*( (1-r)*_gauss_pdf(x, x0, sigma0) + r*_gauss_pdf(x, x1, sigma1) ) + return _dist0 + # helper function to simultaneously fit both histograms with common parameters + def _double_gauss_joint(x, x0, x1, sigma0, sigma1, A0, A1, r0, r1): + _dist0 = double_gauss(x, x0, x1, sigma0, sigma1, A0, r0) + _dist1 = double_gauss(x, x1, x0, sigma1, sigma0, A1, r1) + return np.concatenate((_dist0, _dist1)) + # Guess for fit + pdf_0 = hist_0/np.sum(hist_0) # Get prob. distribution + pdf_1 = hist_1/np.sum(hist_1) # + if _x0_guess == None: + _x0_guess = np.sum(x_vals*pdf_0) # calculate mean + if _x1_guess == None: + _x1_guess = np.sum(x_vals*pdf_1) # + _sigma0_guess = np.sqrt(np.sum((x_vals-_x0_guess)**2*pdf_0)) # calculate std + _sigma1_guess = np.sqrt(np.sum((x_vals-_x1_guess)**2*pdf_1)) # + _r0_guess = 0.01 + _r1_guess = 0.05 + _A0_guess = np.max(hist_0) + _A1_guess = np.max(hist_1) + p0 = [_x0_guess, _x1_guess, _sigma0_guess, _sigma1_guess, _A0_guess, _A1_guess, _r0_guess, _r1_guess] + # Bounding parameters + _x0_bound = (-np.inf,np.inf) + _x1_bound = (-np.inf,np.inf) + _sigma0_bound = (0,np.inf) + _sigma1_bound = (0,np.inf) + _r0_bound = (0,1) + _r1_bound = (0,1) + _A0_bound = (0,np.inf) + _A1_bound = (0,np.inf) + bounds = np.array([_x0_bound, _x1_bound, _sigma0_bound, _sigma1_bound, _A0_bound, _A1_bound, _r0_bound, _r1_bound]) + # Fit parameters within bounds + popt, pcov = curve_fit( + _double_gauss_joint, x_vals, + np.concatenate((hist_0, hist_1)), + p0=p0, bounds=bounds.transpose()) + popt0 = popt[[0,1,2,3,4,6]] + popt1 = popt[[1,0,3,2,5,7]] + # Calculate quantities of interest + SNR = abs(popt0[0] - popt1[0])/((abs(popt0[2])+abs(popt1[2]))/2) + P_e0 = popt0[5]*popt0[2]/(popt0[2]*popt0[5] + popt0[3]*(1-popt0[5])) + P_g1 = popt1[5]*popt1[2]/(popt1[2]*popt1[5] + popt1[3]*(1-popt1[5])) + # Fidelity from fit + _range = x_vals[0], x_vals[-1] + _x_data = np.linspace(*_range, 10001) + _h0 = double_gauss(_x_data, *popt0)# compute distrubition from + _h1 = double_gauss(_x_data, *popt1)# fitted parameters. + Fid_fit, threshold_fit = _calculate_fid_and_threshold(_x_data, _h0, _x_data, _h1) + # Discrimination fidelity + _h0 = double_gauss(_x_data, *popt0[:-1], 0)# compute distrubition without residual + _h1 = double_gauss(_x_data, *popt1[:-1], 0)# excitation of relaxation. + Fid_discr, threshold_discr = _calculate_fid_and_threshold(_x_data, _h0, _x_data, _h1) + # return results + qoi = { 'SNR': SNR, + 'P_e0': P_e0, 'P_g1': P_g1, + 'Fid_fit': Fid_fit, 'Fid_discr': Fid_discr } + return popt0, popt1, qoi + +def _decision_boundary_points(coefs, intercepts): + ''' + Find points along the decision boundaries of + LinearDiscriminantAnalysis (LDA). + This is performed by finding the interception + of the bounds of LDA. For LDA, these bounds are + encoded in the coef_ and intercept_ parameters + of the classifier. + Each bound is given by the equation: + y + coef_i[0]/coef_i[1]*x + intercept_i = 0 + Note this only works for LinearDiscriminantAnalysis. + Other classifiers might have diferent bound models. + ''' + points = {} + # Cycle through model coeficients + # and intercepts. + for i, j in [[0,1], [1,2], [0,2]]: + c_i = coefs[i] + int_i = intercepts[i] + c_j = coefs[j] + int_j = intercepts[j] + x = (- int_j/c_j[1] + int_i/c_i[1])/(-c_i[0]/c_i[1] + c_j[0]/c_j[1]) + y = -c_i[0]/c_i[1]*x - int_i/c_i[1] + points[f'{i}{j}'] = (x, y) + # Find mean point + points['mean'] = np.mean([ [x, y] for (x, y) in points.values()], axis=0) + return points + +class Repeated_CZ_experiment_Analysis(ba.BaseDataAnalysis): + """ + Analysis for LRU experiment. + """ + def __init__(self, + rounds: int, + heralded_init: bool = False, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + auto=True + ): + + super().__init__(t_start=t_start, t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) + + self.rounds = rounds + self.heralded_init = heralded_init + if auto: + self.run_analysis() + + def extract_data(self): + """ + This is a new style (sept 2019) data extraction. + This could at some point move to a higher level class. + """ + self.get_timestamps() + self.timestamp = self.timestamps[0] + data_fp = get_datafilepath_from_timestamp(self.timestamp) + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + self.raw_data_dict = h5d.extract_pars_from_datafile( + data_fp, param_spec) + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + + def process_data(self): + ###################################### + # Sort shots and assign them + ###################################### + _cycle = self.rounds*2 + 3**2 + # Get qubit names in channel order + names = [ name.decode().split(' ')[-2] for name in self.raw_data_dict['value_names'] ] + self.Qubits = names[::2] + # Dictionary that will store raw shots + # so that they can later be sorted. + raw_shots = {q: {} for q in self.Qubits} + for q_idx, qubit in enumerate(self.Qubits): + self.proc_data_dict[qubit] = {} + _ch_I, _ch_Q = 2*q_idx+1, 2*q_idx+2 + _raw_shots = self.raw_data_dict['data'][:,[_ch_I, _ch_Q]] + _shots_0 = _raw_shots[2*self.rounds+0::_cycle] + _shots_1 = _raw_shots[2*self.rounds+4::_cycle] + _shots_2 = _raw_shots[2*self.rounds+8::_cycle] + # Rotate data + center_0 = np.array([np.mean(_shots_0[:,0]), np.mean(_shots_0[:,1])]) + center_1 = np.array([np.mean(_shots_1[:,0]), np.mean(_shots_1[:,1])]) + center_2 = np.array([np.mean(_shots_2[:,0]), np.mean(_shots_2[:,1])]) + raw_shots[qubit] = _rotate_and_center_data(_raw_shots[:,0], _raw_shots[:,1], center_0, center_1) + # Sort different combinations of input states + states = ['0','1', '2'] + combinations = [''.join(s) for s in itertools.product(states, repeat=2)] + self.combinations = combinations + Shots_state = {} + for i, comb in enumerate(combinations): + Shots_state[comb] = raw_shots[qubit][2*self.rounds+i::_cycle] + Shots_0 = np.vstack([Shots_state[comb] for comb in combinations if comb[q_idx]=='0']) + Shots_1 = np.vstack([Shots_state[comb] for comb in combinations if comb[q_idx]=='1']) + Shots_2 = np.vstack([Shots_state[comb] for comb in combinations if comb[q_idx]=='2']) + self.proc_data_dict[qubit]['Shots_0'] = Shots_0 + self.proc_data_dict[qubit]['Shots_1'] = Shots_1 + self.proc_data_dict[qubit]['Shots_2'] = Shots_2 + self.proc_data_dict[qubit]['Shots_state'] = Shots_state + # Use classifier for data + data = np.concatenate((Shots_0, Shots_1, Shots_2)) + labels = [0 for s in Shots_0]+[1 for s in Shots_1]+[2 for s in Shots_2] + from sklearn.discriminant_analysis import LinearDiscriminantAnalysis + clf = LinearDiscriminantAnalysis() + clf.fit(data, labels) + dec_bounds = _decision_boundary_points(clf.coef_, clf.intercept_) + Fid_dict = {} + for state, shots in zip([ '0', '1', '2'], + [Shots_0, Shots_1, Shots_2]): + _res = clf.predict(shots) + _fid = np.mean(_res == int(state)) + Fid_dict[state] = _fid + Fid_dict['avg'] = np.mean([f for f in Fid_dict.values()]) + # Get assignment fidelity matrix + M = np.zeros((3,3)) + for i, shots in enumerate([Shots_0, Shots_1, Shots_2]): + for j, state in enumerate(['0', '1', '2']): + _res = clf.predict(shots) + M[i][j] = np.mean(_res == int(state)) + self.proc_data_dict[qubit]['dec_bounds'] = dec_bounds + self.proc_data_dict[qubit]['classifier'] = clf + self.proc_data_dict[qubit]['Fid_dict'] = Fid_dict + self.proc_data_dict[qubit]['Assignment_matrix'] = M + ######################################### + # Project data along axis perpendicular + # to the decision boundaries. + ######################################### + ############################ + # Projection along 01 axis. + ############################ + # Rotate shots over 01 axis + shots_0 = _rotate_and_center_data(Shots_0[:,0],Shots_0[:,1],dec_bounds['mean'],dec_bounds['01'],phi=np.pi/2) + shots_1 = _rotate_and_center_data(Shots_1[:,0],Shots_1[:,1],dec_bounds['mean'],dec_bounds['01'],phi=np.pi/2) + # Take relavant quadrature + shots_0 = shots_0[:,0] + shots_1 = shots_1[:,0] + n_shots_1 = len(shots_1) + # find range + _all_shots = np.concatenate((shots_0, shots_1)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x0, n0 = np.unique(shots_0, return_counts=True) + x1, n1 = np.unique(shots_1, return_counts=True) + Fid_01, threshold_01 = _calculate_fid_and_threshold(x0, n0, x1, n1) + # Histogram of shots for 1 and 2 + h0, bin_edges = np.histogram(shots_0, bins=100, range=_range) + h1, bin_edges = np.histogram(shots_1, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt0, popt1, params_01 = _fit_double_gauss(bin_centers, h0, h1) + # Save processed data + self.proc_data_dict[qubit]['projection_01'] = {} + self.proc_data_dict[qubit]['projection_01']['h0'] = h0 + self.proc_data_dict[qubit]['projection_01']['h1'] = h1 + self.proc_data_dict[qubit]['projection_01']['bin_centers'] = bin_centers + self.proc_data_dict[qubit]['projection_01']['popt0'] = popt0 + self.proc_data_dict[qubit]['projection_01']['popt1'] = popt1 + self.proc_data_dict[qubit]['projection_01']['SNR'] = params_01['SNR'] + self.proc_data_dict[qubit]['projection_01']['Fid'] = Fid_01 + self.proc_data_dict[qubit]['projection_01']['threshold'] = threshold_01 + ############################ + # Projection along 12 axis. + ############################ + # Rotate shots over 12 axis + shots_1 = _rotate_and_center_data(Shots_1[:,0],Shots_1[:,1],dec_bounds['mean'], dec_bounds['12'], phi=np.pi/2) + shots_2 = _rotate_and_center_data(Shots_2[:,0],Shots_2[:,1],dec_bounds['mean'], dec_bounds['12'], phi=np.pi/2) + # Take relavant quadrature + shots_1 = shots_1[:,0] + shots_2 = shots_2[:,0] + n_shots_2 = len(shots_2) + # find range + _all_shots = np.concatenate((shots_1, shots_2)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x1, n1 = np.unique(shots_1, return_counts=True) + x2, n2 = np.unique(shots_2, return_counts=True) + Fid_12, threshold_12 = _calculate_fid_and_threshold(x1, n1, x2, n2) + # Histogram of shots for 1 and 2 + h1, bin_edges = np.histogram(shots_1, bins=100, range=_range) + h2, bin_edges = np.histogram(shots_2, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt1, popt2, params_12 = _fit_double_gauss(bin_centers, h1, h2) + # Save processed data + self.proc_data_dict[qubit]['projection_12'] = {} + self.proc_data_dict[qubit]['projection_12']['h1'] = h1 + self.proc_data_dict[qubit]['projection_12']['h2'] = h2 + self.proc_data_dict[qubit]['projection_12']['bin_centers'] = bin_centers + self.proc_data_dict[qubit]['projection_12']['popt1'] = popt1 + self.proc_data_dict[qubit]['projection_12']['popt2'] = popt2 + self.proc_data_dict[qubit]['projection_12']['SNR'] = params_12['SNR'] + self.proc_data_dict[qubit]['projection_12']['Fid'] = Fid_12 + self.proc_data_dict[qubit]['projection_12']['threshold'] = threshold_12 + ############################ + # Projection along 02 axis. + ############################ + # Rotate shots over 02 axis + shots_0 = _rotate_and_center_data(Shots_0[:,0],Shots_0[:,1],dec_bounds['mean'],dec_bounds['02'], phi=np.pi/2) + shots_2 = _rotate_and_center_data(Shots_2[:,0],Shots_2[:,1],dec_bounds['mean'],dec_bounds['02'], phi=np.pi/2) + # Take relavant quadrature + shots_0 = shots_0[:,0] + shots_2 = shots_2[:,0] + n_shots_2 = len(shots_2) + # find range + _all_shots = np.concatenate((shots_0, shots_2)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x0, n0 = np.unique(shots_0, return_counts=True) + x2, n2 = np.unique(shots_2, return_counts=True) + Fid_02, threshold_02 = _calculate_fid_and_threshold(x0, n0, x2, n2) + # Histogram of shots for 1 and 2 + h0, bin_edges = np.histogram(shots_0, bins=100, range=_range) + h2, bin_edges = np.histogram(shots_2, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt0, popt2, params_02 = _fit_double_gauss(bin_centers, h0, h2) + # Save processed data + self.proc_data_dict[qubit]['projection_02'] = {} + self.proc_data_dict[qubit]['projection_02']['h0'] = h0 + self.proc_data_dict[qubit]['projection_02']['h2'] = h2 + self.proc_data_dict[qubit]['projection_02']['bin_centers'] = bin_centers + self.proc_data_dict[qubit]['projection_02']['popt0'] = popt0 + self.proc_data_dict[qubit]['projection_02']['popt2'] = popt2 + self.proc_data_dict[qubit]['projection_02']['SNR'] = params_02['SNR'] + self.proc_data_dict[qubit]['projection_02']['Fid'] = Fid_02 + self.proc_data_dict[qubit]['projection_02']['threshold'] = threshold_02 + ############################################ + # Calculate Mux assignment fidelity matrix # + ############################################ + # Get assignment fidelity matrix + M = np.zeros((9,9)) + states = ['0','1', '2'] + combinations = [''.join(s) for s in itertools.product(states, repeat=2)] + # Calculate population vector for each input state + for i, comb in enumerate(combinations): + _res = [] + # Assign shots for each qubit + for q in self.Qubits: + _clf = self.proc_data_dict[q]['classifier'] + _res.append(_clf.predict(self.proc_data_dict[q]['Shots_state'][comb]).astype(str)) + # holds the outcome of shots for each qubit + res = np.array(_res).T + for j, comb in enumerate(combinations): + M[i][j] = np.mean(np.logical_and(*(res == list(comb)).T)) + self.proc_data_dict['Mux_assignment_matrix'] = M + ############################## + # Analyze experimental shots # + ############################## + self.raw_shots = raw_shots + _shots_ref = {} + _shots_exp = {} + for q in self.Qubits: + _clf = self.proc_data_dict[q]['classifier'] + _shots_ref[q] = np.array([ _clf.predict(self.raw_shots[q][i+self.rounds::_cycle]) for i in range(self.rounds) ]) + _shots_exp[q] = np.array([ _clf.predict(self.raw_shots[q][i::_cycle]) for i in range(self.rounds) ]) + # convert to string + _shots_ref[q] = _shots_ref[q].astype(str) + _shots_exp[q] = _shots_exp[q].astype(str) + # Concatenate strings of different outcomes + Shots_ref = _shots_ref[self.Qubits[0]] + Shots_exp = _shots_exp[self.Qubits[0]] + for q in self.Qubits[1:]: + Shots_ref = np.char.add(Shots_ref, _shots_ref[q]) + Shots_exp = np.char.add(Shots_exp, _shots_exp[q]) + ''' + Shots_ref and Shots_exp is an array + of shape (, ). + We will use them to calculate the + population vector at each round. + ''' + Pop_vec_exp = np.zeros((self.rounds, len(combinations))) + Pop_vec_ref = np.zeros((self.rounds, len(combinations))) + for i in range(self.rounds): + Pop_vec_ref[i] = [np.mean(Shots_ref[i]==comb) for comb in combinations] + Pop_vec_exp[i] = [np.mean(Shots_exp[i]==comb) for comb in combinations] + # Apply readout corrections + M = self.proc_data_dict['Mux_assignment_matrix'] + M_inv = np.linalg.inv(M) + Pop_vec_ref = np.dot(Pop_vec_ref, M_inv) + Pop_vec_exp = np.dot(Pop_vec_exp, M_inv) + self.proc_data_dict['Pop_vec_ref'] = Pop_vec_ref + self.proc_data_dict['Pop_vec_exp'] = Pop_vec_exp + # Calculate 2-qubit leakage probability + _leak_idxs = np.where([ '2' in comb for comb in combinations])[0] + P_leak_ref = np.sum(Pop_vec_ref[:,_leak_idxs], axis=1) + P_leak_exp = np.sum(Pop_vec_exp[:,_leak_idxs], axis=1) + self.proc_data_dict['P_leak_ref'] = P_leak_ref + self.proc_data_dict['P_leak_exp'] = P_leak_exp + # Fit leakage and seepage + from scipy.optimize import curve_fit + def func(n, L, S): + return (1-np.exp(-n*(L+S)))*L/(L+S) + _x = np.arange(self.rounds+1) + _y = [0]+list(self.proc_data_dict['P_leak_ref']) + p0 = [.1, .1] + popt_ref, pcov_ref = curve_fit(func, _x, _y, p0=p0, bounds=((0,0), (1,1))) + _y = [0]+list(self.proc_data_dict['P_leak_exp']) + popt_exp, pcov_exp = curve_fit(func, _x, _y, p0=p0, bounds=((0,0), (1,1))) + self.proc_data_dict['fit_ref'] = popt_ref, pcov_ref + self.proc_data_dict['fit_exp'] = popt_exp, pcov_exp + # Calculate individual leakage probability + for i, q in enumerate(self.Qubits): + _leak_idxs = np.where([ '2' == comb[i] for comb in combinations])[0] + P_leak_ref = np.sum(Pop_vec_ref[:,_leak_idxs], axis=1) + P_leak_exp = np.sum(Pop_vec_exp[:,_leak_idxs], axis=1) + self.proc_data_dict[f'P_leak_ref_{q}'] = P_leak_ref + self.proc_data_dict[f'P_leak_exp_{q}'] = P_leak_exp + # Fit leakage and seepage rates + _x = np.arange(self.rounds) + _y = list(self.proc_data_dict[f'P_leak_ref_{q}']) + p0 = [.1, .1] + popt_ref, pcov_ref = curve_fit(func, _x, _y, p0=p0, bounds=((0,0), (1,1))) + _y = list(self.proc_data_dict[f'P_leak_exp_{q}']) + popt_exp, pcov_exp = curve_fit(func, _x, _y, p0=p0, bounds=((0,0), (1,1))) + self.proc_data_dict[f'fit_ref_{q}'] = popt_ref, pcov_ref + self.proc_data_dict[f'fit_exp_{q}'] = popt_exp, pcov_exp + + def prepare_plots(self): + self.axs_dict = {} + for qubit in self.Qubits: + fig = plt.figure(figsize=(8,4), dpi=100) + axs = [fig.add_subplot(121), + fig.add_subplot(322), + fig.add_subplot(324), + fig.add_subplot(326)] + # fig.patch.set_alpha(0) + self.axs_dict[f'IQ_readout_histogram_{qubit}'] = axs[0] + self.figs[f'IQ_readout_histogram_{qubit}'] = fig + self.plot_dicts[f'IQ_readout_histogram_{qubit}'] = { + 'plotfn': ssro_IQ_projection_plotfn, + 'ax_id': f'IQ_readout_histogram_{qubit}', + 'shots_0': self.proc_data_dict[qubit]['Shots_0'], + 'shots_1': self.proc_data_dict[qubit]['Shots_1'], + 'shots_2': self.proc_data_dict[qubit]['Shots_2'], + 'projection_01': self.proc_data_dict[qubit]['projection_01'], + 'projection_12': self.proc_data_dict[qubit]['projection_12'], + 'projection_02': self.proc_data_dict[qubit]['projection_02'], + 'classifier': self.proc_data_dict[qubit]['classifier'], + 'dec_bounds': self.proc_data_dict[qubit]['dec_bounds'], + 'Fid_dict': self.proc_data_dict[qubit]['Fid_dict'], + 'qubit': qubit, + 'timestamp': self.timestamp + } + fig, ax = plt.subplots(figsize=(3,3), dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict[f'Assignment_matrix_{qubit}'] = ax + self.figs[f'Assignment_matrix_{qubit}'] = fig + self.plot_dicts[f'Assignment_matrix_{qubit}'] = { + 'plotfn': assignment_matrix_plotfn, + 'ax_id': f'Assignment_matrix_{qubit}', + 'M': self.proc_data_dict[qubit]['Assignment_matrix'], + 'qubit': qubit, + 'timestamp': self.timestamp + } + fig, ax = plt.subplots(figsize=(6,6), dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict[f'Mux_assignment_matrix'] = ax + self.figs[f'Mux_assignment_matrix'] = fig + self.plot_dicts[f'Mux_assignment_matrix'] = { + 'plotfn': mux_assignment_matrix_plotfn, + 'ax_id': f'Mux_assignment_matrix', + 'M': self.proc_data_dict['Mux_assignment_matrix'], + 'Qubits': self.Qubits, + 'timestamp': self.timestamp + } + fig = plt.figure(figsize=(6,6.5)) + gs = fig.add_gridspec(7, 2) + axs = [fig.add_subplot(gs[0:3,0]), + fig.add_subplot(gs[0:3,1]), + fig.add_subplot(gs[3:5,0]), + fig.add_subplot(gs[3:5,1]), + fig.add_subplot(gs[5:7,:])] + # fig.patch.set_alpha(0) + self.axs_dict['Population_plot'] = axs[0] + self.figs['Population_plot'] = fig + self.plot_dicts['Population_plot'] = { + 'plotfn': population_plotfn, + 'ax_id': 'Population_plot', + 'rounds': self.rounds, + 'combinations': self.combinations, + 'Pop_vec_ref' : self.proc_data_dict['Pop_vec_ref'], + 'Pop_vec_exp' : self.proc_data_dict['Pop_vec_exp'], + 'P_leak_ref' : self.proc_data_dict['P_leak_ref'], + 'P_leak_exp' : self.proc_data_dict['P_leak_exp'], + 'P_leak_ref_q0' : self.proc_data_dict[f'P_leak_ref_{self.Qubits[0]}'], + 'P_leak_exp_q0' : self.proc_data_dict[f'P_leak_exp_{self.Qubits[0]}'], + 'P_leak_ref_q1' : self.proc_data_dict[f'P_leak_ref_{self.Qubits[1]}'], + 'P_leak_exp_q1' : self.proc_data_dict[f'P_leak_exp_{self.Qubits[1]}'], + 'fit_ref' : self.proc_data_dict['fit_ref'], + 'fit_exp' : self.proc_data_dict['fit_exp'], + 'fit_ref_q0' : self.proc_data_dict[f'fit_ref_{self.Qubits[0]}'], + 'fit_exp_q0' : self.proc_data_dict[f'fit_exp_{self.Qubits[0]}'], + 'fit_ref_q1' : self.proc_data_dict[f'fit_ref_{self.Qubits[1]}'], + 'fit_exp_q1' : self.proc_data_dict[f'fit_exp_{self.Qubits[1]}'], + 'Qubits': self.Qubits, + 'timestamp': self.timestamp + } + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def ssro_IQ_projection_plotfn( + shots_0, + shots_1, + shots_2, + projection_01, + projection_12, + projection_02, + classifier, + dec_bounds, + Fid_dict, + timestamp, + qubit, + ax, **kw): + fig = ax.get_figure() + axs = fig.get_axes() + # Fit 2D gaussians + from scipy.optimize import curve_fit + def twoD_Gaussian(data, amplitude, x0, y0, sigma_x, sigma_y, theta): + x, y = data + x0 = float(x0) + y0 = float(y0) + a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2) + b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2) + c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2) + g = amplitude*np.exp( - (a*((x-x0)**2) + 2*b*(x-x0)*(y-y0) + + c*((y-y0)**2))) + return g.ravel() + def _fit_2D_gaussian(X, Y): + counts, _x, _y = np.histogram2d(X, Y, bins=[100, 100], density=True) + x = (_x[:-1] + _x[1:]) / 2 + y = (_y[:-1] + _y[1:]) / 2 + _x, _y = np.meshgrid(_x, _y) + x, y = np.meshgrid(x, y) + p0 = [counts.max(), np.mean(X), np.mean(Y), np.std(X), np.std(Y), 0] + popt, pcov = curve_fit(twoD_Gaussian, (x, y), counts.T.ravel(), p0=p0) + return popt + popt_0 = _fit_2D_gaussian(shots_0[:,0], shots_0[:,1]) + popt_1 = _fit_2D_gaussian(shots_1[:,0], shots_1[:,1]) + popt_2 = _fit_2D_gaussian(shots_2[:,0], shots_2[:,1]) + # Plot stuff + axs[0].plot(shots_0[:,0], shots_0[:,1], '.', color='C0', alpha=0.05) + axs[0].plot(shots_1[:,0], shots_1[:,1], '.', color='C3', alpha=0.05) + axs[0].plot(shots_2[:,0], shots_2[:,1], '.', color='C2', alpha=0.05) + axs[0].plot([0, popt_0[1]], [0, popt_0[2]], '--', color='k', lw=.5) + axs[0].plot([0, popt_1[1]], [0, popt_1[2]], '--', color='k', lw=.5) + axs[0].plot([0, popt_2[1]], [0, popt_2[2]], '--', color='k', lw=.5) + axs[0].plot(popt_0[1], popt_0[2], '.', color='C0', label='ground') + axs[0].plot(popt_1[1], popt_1[2], '.', color='C3', label='excited') + axs[0].plot(popt_2[1], popt_2[2], '.', color='C2', label='$2^\mathrm{nd}$ excited') + axs[0].plot(popt_0[1], popt_0[2], 'x', color='white') + axs[0].plot(popt_1[1], popt_1[2], 'x', color='white') + axs[0].plot(popt_2[1], popt_2[2], 'x', color='white') + # Draw 4sigma ellipse around mean + from matplotlib.patches import Ellipse + circle_0 = Ellipse((popt_0[1], popt_0[2]), + width=4*popt_0[3], height=4*popt_0[4], + angle=-popt_0[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[0].add_patch(circle_0) + circle_1 = Ellipse((popt_1[1], popt_1[2]), + width=4*popt_1[3], height=4*popt_1[4], + angle=-popt_1[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[0].add_patch(circle_1) + circle_2 = Ellipse((popt_2[1], popt_2[2]), + width=4*popt_2[3], height=4*popt_2[4], + angle=-popt_2[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[0].add_patch(circle_2) + # Plot classifier zones + from matplotlib.patches import Polygon + _all_shots = np.concatenate((shots_0, shots_1)) + _lim = np.max([ np.max(np.abs(_all_shots[:,0]))*1.1, np.max(np.abs(_all_shots[:,1]))*1.1 ]) + Lim_points = {} + for bound in ['01', '12', '02']: + dec_bounds['mean'] + _x0, _y0 = dec_bounds['mean'] + _x1, _y1 = dec_bounds[bound] + a = (_y1-_y0)/(_x1-_x0) + b = _y0 - a*_x0 + _xlim = 1e2*np.sign(_x1-_x0) + _ylim = a*_xlim + b + Lim_points[bound] = _xlim, _ylim + # Plot 0 area + _points = [dec_bounds['mean'], Lim_points['01'], Lim_points['02']] + _patch = Polygon(_points, color='C0', alpha=0.2, lw=0) + axs[0].add_patch(_patch) + # Plot 1 area + _points = [dec_bounds['mean'], Lim_points['01'], Lim_points['12']] + _patch = Polygon(_points, color='C3', alpha=0.2, lw=0) + axs[0].add_patch(_patch) + # Plot 2 area + _points = [dec_bounds['mean'], Lim_points['02'], Lim_points['12']] + _patch = Polygon(_points, color='C2', alpha=0.2, lw=0) + axs[0].add_patch(_patch) + # Plot decision boundary + for bound in ['01', '12', '02']: + _x0, _y0 = dec_bounds['mean'] + _x1, _y1 = Lim_points[bound] + axs[0].plot([_x0, _x1], [_y0, _y1], 'k--', lw=1) + axs[0].set_xlim(-_lim, _lim) + axs[0].set_ylim(-_lim, _lim) + axs[0].legend(frameon=False) + axs[0].set_xlabel('Integrated voltage I') + axs[0].set_ylabel('Integrated voltage Q') + axs[0].set_title(f'IQ plot qubit {qubit}') + fig.suptitle(f'{timestamp}\n') + ########################## + # Plot projections + ########################## + # 01 projection + _bin_c = projection_01['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[1].bar(_bin_c, projection_01['h0'], bin_width, fc='C0', alpha=0.4) + axs[1].bar(_bin_c, projection_01['h1'], bin_width, fc='C3', alpha=0.4) + axs[1].plot(_bin_c, double_gauss(_bin_c, *projection_01['popt0']), '-C0') + axs[1].plot(_bin_c, double_gauss(_bin_c, *projection_01['popt1']), '-C3') + axs[1].axvline(projection_01['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_01["Fid"]*100:.1f}%', + f'SNR : {projection_01["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[1].text(.775, .9, text, transform=axs[1].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[1].text(projection_01['popt0'][0], projection_01['popt0'][4]/2, + r'$|g\rangle$', ha='center', va='center', color='C0') + axs[1].text(projection_01['popt1'][0], projection_01['popt1'][4]/2, + r'$|e\rangle$', ha='center', va='center', color='C3') + axs[1].set_xticklabels([]) + axs[1].set_xlim(_bin_c[0], _bin_c[-1]) + axs[1].set_ylim(bottom=0) + axs[1].set_title('Projection of data') + # 12 projection + _bin_c = projection_12['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[2].bar(_bin_c, projection_12['h1'], bin_width, fc='C3', alpha=0.4) + axs[2].bar(_bin_c, projection_12['h2'], bin_width, fc='C2', alpha=0.4) + axs[2].plot(_bin_c, double_gauss(_bin_c, *projection_12['popt1']), '-C3') + axs[2].plot(_bin_c, double_gauss(_bin_c, *projection_12['popt2']), '-C2') + axs[2].axvline(projection_12['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_12["Fid"]*100:.1f}%', + f'SNR : {projection_12["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[2].text(.775, .9, text, transform=axs[2].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[2].text(projection_12['popt1'][0], projection_12['popt1'][4]/2, + r'$|e\rangle$', ha='center', va='center', color='C3') + axs[2].text(projection_12['popt2'][0], projection_12['popt2'][4]/2, + r'$|f\rangle$', ha='center', va='center', color='C2') + axs[2].set_xticklabels([]) + axs[2].set_xlim(_bin_c[0], _bin_c[-1]) + axs[2].set_ylim(bottom=0) + # 02 projection + _bin_c = projection_02['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[3].bar(_bin_c, projection_02['h0'], bin_width, fc='C0', alpha=0.4) + axs[3].bar(_bin_c, projection_02['h2'], bin_width, fc='C2', alpha=0.4) + axs[3].plot(_bin_c, double_gauss(_bin_c, *projection_02['popt0']), '-C0') + axs[3].plot(_bin_c, double_gauss(_bin_c, *projection_02['popt2']), '-C2') + axs[3].axvline(projection_02['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_02["Fid"]*100:.1f}%', + f'SNR : {projection_02["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[3].text(.775, .9, text, transform=axs[3].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[3].text(projection_02['popt0'][0], projection_02['popt0'][4]/2, + r'$|g\rangle$', ha='center', va='center', color='C0') + axs[3].text(projection_02['popt2'][0], projection_02['popt2'][4]/2, + r'$|f\rangle$', ha='center', va='center', color='C2') + axs[3].set_xticklabels([]) + axs[3].set_xlim(_bin_c[0], _bin_c[-1]) + axs[3].set_ylim(bottom=0) + axs[3].set_xlabel('Integrated voltage') + # Write fidelity textbox + text = '\n'.join(('Assignment fidelity:', + f'$F_g$ : {Fid_dict["0"]*100:.1f}%', + f'$F_e$ : {Fid_dict["1"]*100:.1f}%', + f'$F_f$ : {Fid_dict["2"]*100:.1f}%', + f'$F_\mathrm{"{avg}"}$ : {Fid_dict["avg"]*100:.1f}%')) + props = dict(boxstyle='round', facecolor='gray', alpha=.2) + axs[1].text(1.05, 1, text, transform=axs[1].transAxes, + verticalalignment='top', bbox=props) + +def assignment_matrix_plotfn( + M, + qubit, + timestamp, + ax, **kw): + fig = ax.get_figure() + im = ax.imshow(M, cmap=plt.cm.Reds, vmin=0, vmax=1) + + for i in range(M.shape[0]): + for j in range(M.shape[1]): + c = M[j,i] + if abs(c) > .5: + ax.text(i, j, '{:.2f}'.format(c), va='center', ha='center', + color = 'white') + else: + ax.text(i, j, '{:.2f}'.format(c), va='center', ha='center') + ax.set_xticks([0,1,2]) + ax.set_xticklabels([r'$|0\rangle$',r'$|1\rangle$',r'$|2\rangle$']) + ax.set_xlabel('Assigned state') + ax.set_yticks([0,1,2]) + ax.set_yticklabels([r'$|0\rangle$',r'$|1\rangle$',r'$|2\rangle$']) + ax.set_ylabel('Prepared state') + ax.set_title(f'{timestamp}\nQutrit assignment matrix qubit {qubit}') + cbar_ax = fig.add_axes([.95, .15, .03, .7]) + cb = fig.colorbar(im, cax=cbar_ax) + cb.set_label('assignment probability') + +def mux_assignment_matrix_plotfn( + M, + Qubits, + timestamp, + ax, **kw): + fig = ax.get_figure() + + states = ['0','1', '2'] + combinations = [''.join(s) for s in itertools.product(states, repeat=2)] + im = ax.imshow(M, cmap='Reds', vmin=0, vmax=1) + for i in range(9): + for j in range(9): + c = M[j,i] + if abs(c) > .5: + ax.text(i, j, '{:.2f}'.format(c), va='center', ha='center', + color = 'white', size=10) + elif abs(c)>.01: + ax.text(i, j, '{:.2f}'.format(c), va='center', ha='center', + size=8) + ax.set_xticks(np.arange(9)) + ax.set_yticks(np.arange(9)) + ax.set_xticklabels([f'${c0}_\mathrm{{{Qubits[0]}}}{c1}_\mathrm{{{Qubits[1]}}}$' for c0, c1 in combinations], size=8) + ax.set_yticklabels([f'${c0}_\mathrm{{{Qubits[0]}}}{c1}_\mathrm{{{Qubits[1]}}}$' for c0, c1 in combinations], size=8) + ax.set_xlabel('Assigned state') + ax.set_ylabel('Input state') + cb = fig.colorbar(im, orientation='vertical', aspect=35) + pos = ax.get_position() + pos = [ pos.x0+.65, pos.y0, pos.width, pos.height ] + fig.axes[-1].set_position(pos) + cb.set_label('Assignment probability', rotation=-90, labelpad=15) + ax.set_title(f'{timestamp}\nMultiplexed qutrit assignment matrix {" ".join(Qubits)}') + +def population_plotfn( + rounds, + combinations, + Pop_vec_ref, + Pop_vec_exp, + P_leak_ref, + P_leak_exp, + P_leak_ref_q0, + P_leak_exp_q0, + P_leak_ref_q1, + P_leak_exp_q1, + fit_ref, + fit_exp, + fit_ref_q0, + fit_exp_q0, + fit_ref_q1, + fit_exp_q1, + Qubits, + timestamp, + ax, **kw): + + fig = ax.get_figure() + axs = fig.get_axes() + + Rounds = np.arange(rounds) + color = {'00' : '#bbdefb', + '01' : '#64b5f6', + '10' : '#1e88e5', + '11' : '#0d47a1', + '02' : '#003300', + '20' : '#1b5e20', + '12' : '#4c8c4a', + '21' : '#81c784', + '22' : '#b2fab4'} + # Plot probabilities + for i, comb in enumerate(combinations): + label = f'${comb[0]}_\mathrm{{{Qubits[0]}}}{comb[1]}_\mathrm{{{Qubits[1]}}}$' + axs[0].plot(Rounds, Pop_vec_ref[:,i], color=color[comb], label=label) + axs[1].plot(Rounds, Pop_vec_exp[:,i], color=color[comb], label=label) + # Plot qubit leakage probability + def func(n, L, S): + return (1-np.exp(-n*(L+S)))*L/(L+S) + axs[2].plot(Rounds, func(Rounds, *fit_ref_q0[0]), '--k') + axs[2].plot(Rounds, func(Rounds, *fit_exp_q0[0]), '--k') + axs[2].plot(Rounds, P_leak_ref_q0, 'C8', label='Ref.') + axs[2].plot(Rounds, P_leak_exp_q0, 'C4', label='Gate') + axs[2].legend(frameon=False, loc=4) + axs[2].text(.05, .8, Qubits[0], transform=axs[2].transAxes) + axs[3].plot(Rounds, func(Rounds, *fit_ref_q1[0]), '--k') + axs[3].plot(Rounds, func(Rounds, *fit_exp_q1[0]), '--k') + axs[3].plot(Rounds, P_leak_ref_q1, 'C8', label='Ref.') + axs[3].plot(Rounds, P_leak_exp_q1, 'C4', label='Gate') + axs[3].legend(frameon=False, loc=4) + axs[3].text(.05, .8, Qubits[1], transform=axs[3].transAxes) + # Plot total leakage probability + axs[4].plot(Rounds, func(Rounds, *fit_ref[0]), '--k') + axs[4].plot(Rounds, func(Rounds, *fit_exp[0]), '--k') + axs[4].plot(Rounds, P_leak_ref, 'C8', label='Ref.') + axs[4].plot(Rounds, P_leak_exp, 'C4', label='Gate') + # Set common yaxis + _lim = (*axs[0].get_ylim(), *axs[1].get_ylim()) + axs[0].set_ylim(min(_lim), max(_lim)) + axs[1].set_ylim(min(_lim), max(_lim)) + _lim = (*axs[2].get_ylim(), *axs[3].get_ylim()) + axs[2].set_ylim(min(_lim), max(_lim)) + axs[3].set_ylim(min(_lim), max(_lim)) + axs[4].set_xlabel('Rounds') + axs[0].set_ylabel('Population') + axs[2].set_ylabel('Leak. population') + axs[4].set_ylabel('Leak. population') + axs[1].set_yticklabels([]) + axs[3].set_yticklabels([]) + axs[1].legend(frameon=False, bbox_to_anchor=(1.01, 1.1), loc=2) + axs[4].legend(frameon=False) + axs[0].set_title('Reference') + axs[1].set_title('Gate') + fig.suptitle(f'{timestamp}\nRepeated CZ experiment {" ".join(Qubits)}') + fig.tight_layout() + popt_ref_q0, pcov_ref_q0 = fit_ref_q0 + perr_ref_q0 = np.sqrt(np.diag(pcov_ref_q0)) + popt_exp_q0, pcov_exp_q0 = fit_exp_q0 + perr_exp_q0 = np.sqrt(np.diag(pcov_exp_q0)) + perr_CZ_q0 = np.sqrt(perr_ref_q0[0]**2+perr_exp_q0[0]**2) + popt_ref_q1, pcov_ref_q1 = fit_ref_q1 + perr_ref_q1 = np.sqrt(np.diag(pcov_ref_q1)) + popt_exp_q1, pcov_exp_q1 = fit_exp_q1 + perr_exp_q1 = np.sqrt(np.diag(pcov_exp_q1)) + perr_CZ_q1 = np.sqrt(perr_ref_q1[0]**2+perr_exp_q1[0]**2) + text_str = 'Qubit leakage\n'+\ + f'CZ $L_1^\\mathrm{{{Qubits[0]}}}$: ${(popt_exp_q0[0]-popt_ref_q0[0])*100:.2f}\\pm{perr_CZ_q0*100:.2f}$%\n'+\ + f'CZ $L_1^\\mathrm{{{Qubits[1]}}}$: ${(popt_exp_q1[0]-popt_ref_q1[0])*100:.2f}\\pm{perr_CZ_q1*100:.2f}$%' + props = dict(boxstyle='round', facecolor='white', alpha=1) + axs[3].text(1.06, 1, text_str, transform=axs[3].transAxes, fontsize=8.5, + verticalalignment='top', bbox=props) + popt_ref, pcov_ref = fit_ref + perr_ref = np.sqrt(np.diag(pcov_ref)) + popt_exp, pcov_exp = fit_exp + perr_exp = np.sqrt(np.diag(pcov_exp)) + perr_CZ = np.sqrt(perr_ref[0]**2+perr_exp[0]**2) + text_str = 'Ref. curve\n'+\ + f'$L_1$: ${popt_ref[0]*100:.2f}\\pm{perr_ref[0]*100:.2f}$%\n'+\ + f'$L_2$: ${popt_ref[1]*100:.2f}\\pm{perr_ref[1]*100:.2f}$%\n'+\ + 'Gate curve\n'+\ + f'$L_1$: ${popt_exp[0]*100:.2f}\\pm{perr_exp[0]*100:.2f}$%\n'+\ + f'$L_2$: ${popt_exp[1]*100:.2f}\\pm{perr_exp[1]*100:.2f}$%\n\n'+\ + f'CZ $L_1$: ${(popt_exp[0]-popt_ref[0])*100:.2f}\\pm{perr_CZ*100:.2f}$%' + props = dict(boxstyle='round', facecolor='white', alpha=1) + axs[4].text(1.03, 1, text_str, transform=axs[4].transAxes, fontsize=8.5, + verticalalignment='top', bbox=props) + + +def SNZ(delta, tmid, tp, g, delta_0, det11_02, n_dist, B_amp): + ''' + Function parametrizing the SNZ landscape. + Args: + delta : Detuning of high freq. qubit + tp : duration of pulse. + tmid : SNZ tmid parameter. + n_dist : # of sampling points that model pulse distortion. + B_amp : SNZ B amplitude parameter. + g : coupling of avoided crossing. + delta_0 : detuning at avoided crossing. + det11_02 : detuning of 11-02 levels at sweetspot + ''' + g_rad = g*2*np.pi + det11_02_rad = det11_02*2*np.pi + delta_rad = delta*2*np.pi + delta_0_rad = delta_0*2*np.pi + delta_rad -= delta_0_rad + # Convert B_amp to frequency detuning + B_det_rad = (1-B_amp)*det11_02_rad + # Frequency of Chevron oscillation + Omega = np.sqrt(delta_rad**2+(2*g_rad)**2) + # Population of first Chevron oscillation + _term1 = -np.exp(+1j*Omega/2*tp)*((delta_rad+Omega)/(2*g_rad))*( -g_rad/Omega*1 ) + _term2 = -np.exp(-1j*Omega/2*tp)*((delta_rad-Omega)/(2*g_rad))*( +g_rad/Omega*1 ) + _term3 = np.exp(1j*Omega/2*tp)*( -g_rad/Omega*1 ) + _term4 = np.exp(-1j*Omega/2*tp)*( +g_rad/Omega*1 ) + c11 = _term1+_term2 + c20 = _term3+_term4 + # Population after evolving in B amp + tB = .5/2.4e9 + c11 = c11*np.exp(1j*-B_det_rad/2*tB) + c20 = c20*np.exp(1j*+B_det_rad/2*tB) + # Population after evolving in the sweetspot + # We account for pulse distortion using an offset in tmid + t_mid_distorted = (tmid-n_dist/2.4e9) + c11 = c11*np.exp(1j*-det11_02_rad/2*t_mid_distorted) + c20 = c20*np.exp(1j*+det11_02_rad/2*t_mid_distorted) + # Population after evolving in B amp + tB = .5/2.4e9 + c11 = c11*np.exp(1j*-B_det_rad/2*tB) + c20 = c20*np.exp(1j*+B_det_rad/2*tB) + # Population after second Chevron + _term1 = -np.exp(1j*Omega/2*tp)*((delta_rad+Omega)/(2*g_rad))*( c20/2*(1-delta_rad/Omega)-g_rad/Omega*c11 ) + _term2 = -np.exp(-1j*Omega/2*tp)*((delta_rad-Omega)/(2*g_rad))*( c20/2*(1+delta_rad/Omega)+g_rad/Omega*c11 ) + _term3 = np.exp(1j*Omega/2*tp)*( c20/2*(1-delta_rad/Omega)-g_rad/Omega*c11 ) + _term4 = np.exp(-1j*Omega/2*tp)*( c20/2*(1+delta_rad/Omega)+g_rad/Omega*c11 ) + c11 = _term1+_term2 + c20 = _term3+_term4 + # Calculate state populations + pop11 = np.abs(c11)**2 + pop20 = np.abs(c20)**2 + # Calculate conditional phase + phase11 = np.angle(c11) + phase20 = np.angle(c20) + cphase = np.angle(c11) - delta_rad*tp + det11_02_rad*t_mid_distorted/2 + B_det_rad*tB + cphase *= -1 + phase11 = np.mod(phase11*180/np.pi, 360) + phase20 = np.mod(phase20*180/np.pi, 360) + cphase = np.mod(cphase*180/np.pi, 360) + return pop20, pop11, cphase + +def get_optimal_SNZ_params(xy, pop20, cphase): + ''' + Extracts optimal SNZ parameters detuning, + tmid [ xy = (det, tmid) ] using corresponding + 20 population and cphase from SNZ lanscape. + ''' + x, y = xy + assert x.shape == y.shape + assert (x.shape == pop20.shape) and (x.shape == cphase.shape) + # build cost function for finding optimal parameters + _a = ((cphase-180)/180)**2 # cphase term + _b = pop20**2 # leakage term + _c = np.mean(pop20, axis=0)**2 + Cost_function = _a+_b+_c + nx, ny = np.unravel_index(np.argmin(Cost_function, axis=None), Cost_function.shape) + opt_detuning = x[nx, ny] + opt_tmid = y[nx, ny] + return opt_detuning, opt_tmid*2.4e9 + +def get_Tmid_parameters_from_SNZ_landscape(XY, fit_params): + ''' + Extracts multiple optimal parameters from + different 180 cphase contours of SNZ lanscape. + Args: + XY : (Dets, Tmids) Tuple of 1D arrays with + detunings (Hz) and Tmids (# sampling points) + of landscape. + fit_params: output SNZ fit params. + ''' + # Sort fit parameters + tp_factor, g, delta_0, det11_02, n_dist, _a, _b = fit_params + tp = tp_factor/(4*g) + # Get interpolated landscape limits + Det, Tmid = XY + X = np.linspace(Det[0], Det[-1], 201) + Y = np.linspace(Tmid[0], Tmid[-1], 201)/2.4e9 + # Number of suitable 180 cphase contour levels (+1) + n_contours = int((Y[-1]-n_dist/2.4e9)*det11_02) + # Calculate optimal parameters for each contour section + Opt_params = [] + for i in range(n_contours+2): + # Calculate interpolated SNZ landscape for contour section + X_range = X*1 + Y_range = np.linspace((i-.5)/det11_02+n_dist/2.4e9, + (i+.5)/det11_02+n_dist/2.4e9, 201) + _x, _y = np.meshgrid(X_range, Y_range) + z0, z1, z2 = SNZ(delta=_x, tmid=_y, g=g, tp=tp, B_amp=0.5, + delta_0=delta_0, det11_02=det11_02, n_dist=n_dist) + # Compute optimal det and tmid for contour section + opt_params = get_optimal_SNZ_params((_x, _y), z0, z2) + # If value is within range + if (opt_params[1]>Tmid[0]) and (opt_params[1]1: + self.figs[f'VCZ_landscape_{self.Q0}_{self.Q1}'] = plt.figure(figsize=(9,4*n), dpi=100) + # self.figs[f'VCZ_landscape_{self.Q0}_{self.Q1}'].patch.set_alpha(0) + axs = [] + for i, q0 in enumerate(self.Q0): + axs.append(self.figs[f'VCZ_landscape_{self.Q0}_{self.Q1}'].add_subplot(n,2,2*i+1)) + axs.append(self.figs[f'VCZ_landscape_{self.Q0}_{self.Q1}'].add_subplot(n,2,2*i+2)) + + self.axs_dict[f'plot_{i}'] = axs[0] + + self.plot_dicts[f'VCZ_landscape_{self.Q0}_{self.Q1}_{i}']={ + 'plotfn': VCZ_Tmid_landscape_plotfn, + 'ax_id': f'plot_{i}', + 'Amps' : self.proc_data_dict['Amps'][i], + 'Tmid' : self.proc_data_dict['Tmid'], + 'CP' : self.proc_data_dict[f'CP_{i}'], + 'MF' : self.proc_data_dict[f'MF_{i}'], + 'q0' : self.Q0[i], 'q1' : self.Q1[i], + 'ts' : self.timestamp, 'n': i, + 'title' : f'Qubits {" ".join(self.Q0)}, {" ".join(self.Q1)}', + } + + for i, q0 in enumerate(self.Q0): + self.figs[f'VCZ_landscape_{q0}_{self.Q1[i]}'] = plt.figure(figsize=(8,4.25), dpi=100) + # self.figs[f'VCZ_landscape_{q0}_{self.Q1[i]}'].patch.set_alpha(0) + axs = [self.figs[f'VCZ_landscape_{q0}_{self.Q1[i]}'].add_subplot(121), + self.figs[f'VCZ_landscape_{q0}_{self.Q1[i]}'].add_subplot(122)] + + self.axs_dict[f'conditional_phase_{i}'] = axs[0] + self.axs_dict[f'missing_fraction_{i}'] = axs[0] + + self.plot_dicts[f'VCZ_landscape_{self.Q0[i]}_{self.Q1[i]}']={ + 'plotfn': VCZ_Tmid_landscape_plotfn, + 'ax_id': f'conditional_phase_{i}', + 'Amps' : self.proc_data_dict['Amps'][i], + 'Tmid' : self.proc_data_dict['Tmid'], + 'CP' : self.proc_data_dict[f'CP_{i}'], + 'MF' : self.proc_data_dict[f'MF_{i}'], + 'q0' : self.Q0[i], 'q1' : self.Q1[i], + 'ts' : self.timestamp, + 'q0_freq' : self.Q0_freq, + 'Dets' : self.proc_data_dict['Detunings'][i] if self.Poly_coefs\ + else None, + 'fit_params' : self.proc_data_dict[f'Fit_params_{i}'] if self.Poly_coefs\ + else None, + 'Opt_params' : self.proc_data_dict[f'Opt_params_{i}'] if self.Poly_coefs\ + else None, + } + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def find_contours(Array, value=180): + ''' + array: 2D array on which to search + value: values of the contours desired + ''' + # Find points close to value + _points = [] + for i in range(Array.shape[0]): + idxs = np.where(np.abs(Array[i,:]-value)<0.99) + for j in idxs[0]: + _points.append([i,j]) + # Sort points in different contours + _contours = [[_points[0]]] + for point in _points[1:]: + p_distance = Array.shape[0] + for contour in _contours: + for p in contour: + _distance = np.sqrt( np.sum( (np.array(point) - np.array(p))**2 ) ) + p_distance = min(_distance, _distance) + if p_distance < 10: + contour.append(point) + break + if p_distance < 10: + pass + else: + _contours.append([point]) + return _contours + +def VCZ_Tmid_landscape_plotfn( + ax, + Amps, Tmid, + CP, MF, + q0, q1, + ts, n=0, + Dets=None, + q0_freq=None, + fit_params=None, + Opt_params=None, + title=None, **kw): + + fig = ax.get_figure() + axs = fig.get_axes() + # Plot leakage and conditional phase landscapes + def get_plot_axis(vals, rang=None): + dx = vals[1]-vals[0] + X = np.concatenate((vals, [vals[-1]+dx])) - dx/2 + if rang: + X = X/np.max(vals) * (rang[1]-rang[0]) + rang[0] + return X + # Plot versus transmon detuning + if type(Dets) != type(None): + X = get_plot_axis(Dets) + # Plot versus gain + else: + X = get_plot_axis(Amps) + Y = get_plot_axis(Tmid) + a1 = axs[0+2*n].pcolormesh(X, Y, CP, cmap=hsluv_anglemap45, vmin=0, vmax=360) + fig.colorbar(a1, ax=axs[0+2*n], label='conditional phase', ticks=[0, 90, 180, 270, 360]) + a2 = axs[1+2*n].pcolormesh(X, Y, MF, cmap='hot') + fig.colorbar(a2, ax=axs[1+2*n], label='missing fraction') + # Set axis labels + for i in range(2): + axs[i+2*n].set_xlabel('Amplitude') + axs[i+2*n].set_ylabel(r'$\tau_\mathrm{mid}$ (#)') + if type(Dets) != type(None): + set_xlabel(axs[i+2*n], f'{q0} detuning', unit='Hz') + axs[0+2*n].set_title(f'Conditional phase') + axs[1+2*n].set_title(f'Missing fraction') + # Set figure title + if title: + fig.suptitle(title+'\n'+ts, y=1.01) + axs[0+2*n].set_title(f'Conditional phase {q0} {q1}') + axs[1+2*n].set_title(f'Missing fraction {q0} {q1}') + else: + fig.suptitle(ts+f'\nQubits {q0} {q1}', fontsize=14, y=.9) + axs[0].set_title(f'Conditional phase') + axs[1].set_title(f'Missing fraction') + # Add qubit frequency axis and SNZ leakage fit contours + if type(Dets) != type(None): + # Add qubit frequency axis + if q0_freq: + axt0 = axs[0+2*n].twiny() + axt0.set_xlim((q0_freq-np.array(axs[0+2*n].get_xlim()))*1e-9) + axt0.set_xlabel(f'{q0} Frequency (GHz)') + axt1 = axs[1+2*n].twiny() + axt1.set_xlim((q0_freq-np.array(axs[1+2*n].get_xlim()))*1e-9) + axt1.set_xlabel(f'{q0} Frequency (GHz)') + # Plot SNZ leakage fitting contours + _x = np.linspace(X[0], X[-1], 201) + _y = np.linspace(Y[0], Y[-1], 201) + _X, _Y = np.meshgrid(_x, _y) + # Get interpolated landscape from fit + # fit params + tp_factor, g, delta_0, det11_02, n_dist, a, b = fit_params + Pop20, Pop11, Cphase = SNZ(_X, _Y/2.4e9, + tp = tp_factor/(4*g), + g = g, + delta_0 = delta_0, + det11_02 = det11_02, + n_dist = n_dist, + B_amp=0.5 + ) + for i in range(2): + # Plot leakage contours + for c, a_ in zip([.2, .6, .8], [.7, .85, 1]): + axs[i+2*n].contour(_X, _Y, Pop20, [c], colors=['w'], + linewidths=[1], linestyles=['--'], alpha=a_) + # Plot 180 cphase contours + # CS = axs[i+2*n].contour(_X, _Y, Cphase, [180], colors=['w'], + # linewidths=[1.5], linestyles=['--'], alpha=1) + # axs[i+2*n].clabel(CS, CS.levels, inline=True, fmt='$%i^\\circ$', fontsize=10) + Contours_180 = find_contours(Cphase, value=180) + for c, contour in enumerate(Contours_180): + axs[i+2*n].plot(_x[np.array(contour)[:,1]], _y[np.array(contour)[:,0]], f'w--', lw=2) + # Plot optimal parameters + for opt_det, opt_tmid in Opt_params[:-1]: + axs[i+2*n].plot([opt_det], [opt_tmid], '*', markersize=12, + markerfacecolor='cornflowerblue', markeredgewidth=1, + markeredgecolor='w', zorder=100) + for opt_det, opt_tmid in Opt_params[-1:]: + axs[i+2*n].plot([opt_det], [opt_tmid], '*', markersize=12, + markerfacecolor='blue', markeredgewidth=1, + markeredgecolor='w', zorder=100) + else: + # Plot 180 phase contours + def get_contours(cphase, phase): + n = len(cphase) + x = [] + y = np.arange(n) + for i in range(n): + x.append(np.argmin(abs(cphase[i]-phase))) + dx = np.array(x)[1:]-np.array(x)[:-1] + k = 0 + contours = {'0': {'x':[x[0]], 'y':[0]}} + for i, s in enumerate(dx): + if s > 0: + contours[f'{k}']['x'].append(x[i+1]) + contours[f'{k}']['y'].append(i+1) + else: + k += 1 + contours[f'{k}'] = {'x':[x[i+1]], 'y':[i+1]} + return contours + CT = get_contours(CP, phase=180) + for c in CT.values(): + if type(Dets) != type(None): + c['x'] = Dets[c['x']] + else: + c['x'] = Amps[c['x']] + c['y'] = Tmid[c['y']] + axs[1+2*n].plot(c['x'], c['y'], marker='', ls='--', color='white') + fig.tight_layout() + + +def SNZ2(delta, tmid, tp, g, delta_0, det11_02, n_dist, B_amp): + ''' + Function parametrizing the SNZ landscape. + Args: + delta : Detuning of high freq. qubit + tp : duration of pulse + tmid : SNZ tmid parameter + g : coupling of avoided crossing + delta_0 : detuning at avoided crossing. + det11_02 : detuning of 11-02 levels at sweetspot + ''' + g_rad = g*2*np.pi + det11_02_rad = det11_02*2*np.pi + delta_rad = delta*2*np.pi + delta_0_rad = delta_0*2*np.pi + delta_rad -= delta_0_rad + # Convert B_amp to frequency detuning + B_det_rad = (1-B_amp)*det11_02_rad + # Frequency of Chevron oscillation + Omega = np.sqrt(delta_rad**2+(2*g_rad)**2) + # Population of first Chevron oscillation + _term1 = -np.exp(+1j*Omega/2*tp)*((delta_rad+Omega)/(2*g_rad))*( -g_rad/Omega*1 ) + _term2 = -np.exp(-1j*Omega/2*tp)*((delta_rad-Omega)/(2*g_rad))*( +g_rad/Omega*1 ) + _term3 = np.exp(1j*Omega/2*tp)*( -g_rad/Omega*1 ) + _term4 = np.exp(-1j*Omega/2*tp)*( +g_rad/Omega*1 ) + c11 = _term1+_term2 + c20 = _term3+_term4 + # Population after evolving in B amp + tB = 1/2.4e9 + c11 = c11*np.exp(1j*-B_det_rad/2*tB) + c20 = c20*np.exp(1j*+B_det_rad/2*tB) + # Population after evolving in the sweetspot + # We account for pulse distortion using an offset in tmid + t_mid_distorted = (tmid - n_dist/2.4e9) + c11 = c11*np.exp(1j*-det11_02_rad/2*t_mid_distorted) + c20 = c20*np.exp(1j*+det11_02_rad/2*t_mid_distorted) + # Population after evolving in B amp + tB = 1/2.4e9 + c11 = c11*np.exp(1j*-B_det_rad/2*tB) + c20 = c20*np.exp(1j*+B_det_rad/2*tB) + # Population after second Chevron + _term1 = -np.exp(1j*Omega/2*tp)*((delta_rad+Omega)/(2*g_rad))*( c20/2*(1-delta_rad/Omega)-g_rad/Omega*c11 ) + _term2 = -np.exp(-1j*Omega/2*tp)*((delta_rad-Omega)/(2*g_rad))*( c20/2*(1+delta_rad/Omega)+g_rad/Omega*c11 ) + _term3 = np.exp(1j*Omega/2*tp)*( c20/2*(1-delta_rad/Omega)-g_rad/Omega*c11 ) + _term4 = np.exp(-1j*Omega/2*tp)*( c20/2*(1+delta_rad/Omega)+g_rad/Omega*c11 ) + c11 = _term1+_term2 + c20 = _term3+_term4 + # Calculate state populations + pop11 = np.abs(c11)**2 + pop20 = np.abs(c20)**2 + # Calculate conditional phase + phase11 = np.angle(c11) + phase20 = np.angle(c20) + cphase = np.angle(c11) - delta_rad*tp + det11_02_rad*t_mid_distorted/2 + B_det_rad*tB + cphase *= -1 + phase11 = np.mod(phase11*180/np.pi, 360) + phase20 = np.mod(phase20*180/np.pi, 360) + cphase = np.mod(cphase*180/np.pi, 360) + return pop20, pop11, cphase + +class VCZ_B_Analysis(ba.BaseDataAnalysis): + """ + Analysis + """ + def __init__(self, + Q0, + Q1, + A_ranges, + directions, + Poly_coefs: list = None, + Out_range: float = 5, + DAC_amp: float = 0.5, + tmid: float = None, + Q0_freq:float = None, + Q_parks: str = None, + l1_coef: float = 1, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + auto=True, + asymmetry: float = 0): + + super().__init__(t_start=t_start, + t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) + self.Q0 = Q0 + self.Q1 = Q1 + self.Q_parks = Q_parks + self.ranges = A_ranges + self.directions = directions + self.Poly_coefs = Poly_coefs + self.Out_range = Out_range + self.DAC_amp = DAC_amp + self.Q0_freq = Q0_freq + self.tmid = tmid + self.asymmetry = asymmetry + self.l1_coef = l1_coef + if auto: + self.run_analysis() + + def extract_data(self): + self.get_timestamps() + self.timestamp = self.timestamps[0] + + data_fp = get_datafilepath_from_timestamp(self.timestamp) + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + self.raw_data_dict = h5d.extract_pars_from_datafile( + data_fp, param_spec) + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + + def process_data(self): + self.proc_data_dict = {} + self.qoi = {} + Amps_idxs = np.unique(self.raw_data_dict['data'][:,0]) + Bamps = np.unique(self.raw_data_dict['data'][:,1]) + nx, ny = len(Amps_idxs), len(Bamps) + Amps_list = [ np.linspace(r[0], r[1], nx) for r in self.ranges ] + self.proc_data_dict['Amps'] = Amps_list + self.proc_data_dict['Bamps'] = Bamps + if self.Poly_coefs: + P_funcs = [ np.poly1d(coefs) for coefs in self.Poly_coefs ] + Detunings = [ P_funcs[i](Amps_list[i]*self.DAC_amp*self.Out_range/2*(1+self.asymmetry)) \ + for i in range(len(self.Q0)) ] + self.proc_data_dict['Detunings'] = Detunings + # Calculate cost function to find optimal + # parameters of amplitude and B amp + def cost_function(CP, MF, + phase=180, + cp_coef=1, l1_coef=1): + ''' + Cost function for minimizing cphase + error and leakage simultaneously. + ''' + A = ((np.abs(CP)-180)/180)**2 + B = ((MF-np.min(MF))/.5)**2 + C = (np.mean(MF-np.min(MF), axis=0)/.5)**2 + return cp_coef*A + l1_coef*(B+C) + for i, q0 in enumerate(self.Q0): + CP = self.raw_data_dict['data'][:,2*i+2].reshape(ny, nx) + MF = self.raw_data_dict['data'][:,2*i+3].reshape(ny, nx) + CF = cost_function(CP, MF, l1_coef=self.l1_coef) + # Find minimum of cost function + idxs_min = np.unravel_index(np.argmin(CF), CF.shape) + A_min, B_min = Amps_list[i][idxs_min[1]], Bamps[idxs_min[0]] + CP_min, L1_min = CP[idxs_min], MF[idxs_min]/2 + if self.Poly_coefs: + Det_min = Detunings[i][idxs_min[1]] + self.qoi[f'Optimal_det_{q0}'] = Det_min + # Save quantities of interest + self.proc_data_dict[f'CP_{i}'] = CP + self.proc_data_dict[f'MF_{i}'] = MF + self.proc_data_dict[f'CF_{i}'] = CF + self.qoi[f'Optimal_amps_{q0}'] = A_min, B_min + self.qoi[f'Gate_perf_{q0}'] = CP_min, L1_min + # Fit SNZ landscapes using SNZ + # landscape parametrization + # if self.Poly_coefs: + # for i, q0 in enumerate(self.Q0): + # # Define fit function + # from scipy.optimize import curve_fit + # def fit_func(xy, tp_factor, tmid, g, delta_0, det11_02, n_dist, a, b): + # ''' + # Fit function helper for SNZ gate landscape. + # ''' + # delta, bamp = xy + # tp = tp_factor/(4*g) + # pop20, pop11, cphase = SNZ2(delta, tmid, tp, g, delta_0, det11_02, n_dist, B_amp=bamp) + # outcome = a*pop20 + b + # return outcome.ravel() + # # sort fit data + # _detunings = self.proc_data_dict['Detunings'][i] + # _Bamps = self.proc_data_dict['Bamps'] + # x, y = np.meshgrid(_detunings, _Bamps) + # # Multiply missing fraction by two to get population. + # z = 2*self.proc_data_dict[f'MF_{i}'] + # # initial fit guess + # # tp_factor, tmid, g, delta_0, det_11_02, n_dist, a, b + # p0 = [ 1, self.tmid, 12e6, np.mean(Detunings), np.mean(Detunings)*1.1, .5, 1, 0] + # bounds = ((0.9, 0, 10e6, 0, 0, 0, 0.1, -.1), + # (1.1, 12/2.4e9, 13e6, np.inf, np.inf, 2, 1.1, .1)) + # popt, pcov = curve_fit(fit_func, (x,y), z.ravel(), p0=p0, bounds=bounds) + # self.proc_data_dict[f'Fit_params_{i}'] = popt + # self.qoi[f'tp_factor_{i}'] = popt[0] + + def prepare_plots(self): + self.axs_dict = {} + + n = len(self.Q0) + if n>1: + self.figs[f'VCZ_landscape_{self.Q0}_{self.Q1}'] = plt.figure(figsize=(15,4*n), dpi=100) + # self.figs[f'VCZ_landscape_{self.Q0}_{self.Q1}'].patch.set_alpha(0) + axs = [] + for i, q0 in enumerate(self.Q0): + axs.append(self.figs[f'VCZ_landscape_{self.Q0}_{self.Q1}'].add_subplot(n,3,3*i+1)) + axs.append(self.figs[f'VCZ_landscape_{self.Q0}_{self.Q1}'].add_subplot(n,3,3*i+2)) + axs.append(self.figs[f'VCZ_landscape_{self.Q0}_{self.Q1}'].add_subplot(n,3,3*i+3)) + + self.axs_dict[f'plot_{i}'] = axs[0] + + self.plot_dicts[f'VCZ_landscape_{self.Q0}_{self.Q1}_{i}']={ + 'plotfn': VCZ_B_landscape_plotfn, + 'ax_id': f'plot_{i}', + 'Amps' : self.proc_data_dict['Amps'][i], + 'Bamps' : self.proc_data_dict['Bamps'], + 'CP' : self.proc_data_dict[f'CP_{i}'], + 'MF' : self.proc_data_dict[f'MF_{i}'], + 'CF' : self.proc_data_dict[f'CF_{i}'], + 'q0' : self.Q0[i], 'q1' : self.Q1[i], + 'opt' : self.qoi[f'Optimal_amps_{q0}'], + 'ts' : self.timestamp, + 'n': i, + 'direction' : self.directions[i][0], + 'title' : f'Qubits {" ".join(self.Q0)}, {" ".join(self.Q1)}', + 'gate_perf' : self.qoi[f'Gate_perf_{q0}'] + } + + for i, q0 in enumerate(self.Q0): + if self.Poly_coefs: + fig = plt.figure(figsize=(13,4), dpi=100) + else: + fig = plt.figure(figsize=(15,4), dpi=100) + self.figs[f'VCZ_landscape_{q0}_{self.Q1[i]}'] = fig + # self.figs[f'VCZ_landscape_{q0}_{self.Q1[i]}'].patch.set_alpha(0) + axs = [self.figs[f'VCZ_landscape_{q0}_{self.Q1[i]}'].add_subplot(131), + self.figs[f'VCZ_landscape_{q0}_{self.Q1[i]}'].add_subplot(132), + self.figs[f'VCZ_landscape_{q0}_{self.Q1[i]}'].add_subplot(133)] + self.axs_dict[f'conditional_phase_{i}'] = axs[0] + + self.plot_dicts[f'VCZ_landscape_{self.Q0[i]}_{self.Q1[i]}']={ + 'plotfn': VCZ_B_landscape_plotfn, + 'ax_id': f'conditional_phase_{i}', + 'Amps' : self.proc_data_dict['Amps'][i], + 'Bamps' : self.proc_data_dict['Bamps'], + 'CP' : self.proc_data_dict[f'CP_{i}'], + 'MF' : self.proc_data_dict[f'MF_{i}'], + 'CF' : self.proc_data_dict[f'CF_{i}'], + 'q0' : self.Q0[i], 'q1' : self.Q1[i], + 'ts' : self.timestamp, + 'gate_perf' : self.qoi[f'Gate_perf_{q0}'], + 'direction' : self.directions[i][0], + 'q0_freq' : self.Q0_freq, + 'Dets' : self.proc_data_dict['Detunings'][i] if self.Poly_coefs\ + else None, + 'opt' : (self.qoi[f'Optimal_det_{q0}'], self.qoi[f'Optimal_amps_{q0}'][1])\ + if self.Poly_coefs else self.qoi[f'Optimal_amps_{q0}'], + # 'fit_params' : self.proc_data_dict[f'Fit_params_{i}'] if self.Poly_coefs\ + # else None, + } + + self.figs[f'VCZ_Leakage_contour_{q0}_{self.Q1[i]}'] = plt.figure(figsize=(9,4), dpi=100) + # self.figs[f'VCZ_Leakage_contour_{q0}_{self.Q1[i]}'].patch.set_alpha(0) + axs = [self.figs[f'VCZ_Leakage_contour_{q0}_{self.Q1[i]}'].add_subplot(121), + self.figs[f'VCZ_Leakage_contour_{q0}_{self.Q1[i]}'].add_subplot(122)] + self.axs_dict[f'contour_{i}'] = axs[0] + + self.plot_dicts[f'VCZ_Leakage_contour_{q0}_{self.Q1[i]}']={ + 'plotfn': VCZ_L1_contour_plotfn, + 'ax_id': f'contour_{i}', + 'Amps' : self.proc_data_dict['Amps'][i], + 'Bamps' : self.proc_data_dict['Bamps'], + 'CP' : self.proc_data_dict[f'CP_{i}'], + 'MF' : self.proc_data_dict[f'MF_{i}'], + 'CF' : self.proc_data_dict[f'CF_{i}'], + 'q0' : self.Q0[i], + 'q1' : self.Q1[i], + 'ts' : self.timestamp, + 'gate_perf' : self.qoi[f'Gate_perf_{q0}'], + 'direction' : self.directions[i][0], + 'q0_freq' : self.Q0_freq, + 'Dets' : self.proc_data_dict['Detunings'][i] if self.Poly_coefs\ + else None, + 'opt' : (self.qoi[f'Optimal_det_{q0}'], self.qoi[f'Optimal_amps_{q0}'][1])\ + if self.Poly_coefs else self.qoi[f'Optimal_amps_{q0}'], + } + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def VCZ_B_landscape_plotfn( + ax, + Amps, Bamps, + CP, MF, CF, + q0, q1, ts, + gate_perf, + opt, + Dets=None, + q0_freq=None, + direction=None, + fit_params=None, + n=0, title=None, **kw): + + fig = ax.get_figure() + axs = fig.get_axes() + # Plot leakage and conditional phase landscapes + def get_plot_axis(vals, rang=None): + dx = vals[1]-vals[0] + X = np.concatenate((vals, [vals[-1]+dx])) - dx/2 + if rang: + X = X/np.max(vals) * (rang[1]-rang[0]) + rang[0] + return X + # Plot versus transmon detuning + if type(Dets) != type(None): + X = get_plot_axis(Dets) + # Plot versus gain + else: + X = get_plot_axis(Amps) + Y = get_plot_axis(Bamps) + a1 = axs[0+3*n].pcolormesh(X, Y, CP, cmap=hsluv_anglemap45, vmin=0, vmax=360) + fig.colorbar(a1, ax=axs[0+3*n], label='conditional phase', ticks=[0, 90, 180, 270, 360]) + a2 = axs[1+3*n].pcolormesh(X, Y, MF, cmap='hot') + fig.colorbar(a2, ax=axs[1+3*n], label='missing fraction') + a3 = axs[2+3*n].pcolormesh(X, Y, CF, cmap='viridis', + norm=LogNorm(vmin=CF.min(), vmax=CF.max())) + fig.colorbar(a3, ax=axs[2+3*n], label='cost function') + # Plot gate parameters and metrics + text_str = 'Optimal parameters\n'+\ + f'gate: {q0} CZ_{direction}\n'+\ + f'$\\phi$: {gate_perf[0]:.2f} \t $L_1$: {gate_perf[1]*100:.1f}%\n' + if type(Dets) != type(None): + text_str += f'Detuning: {opt[0]*1e-6:.1f}MHz\n' + else: + text_str += f'A amp: {opt[0]:.4f}\n' + text_str += f'B amp: {opt[1]:.4f}' + # Add fit params + if type(fit_params) != type(None): + tp_factor, tmid, g, delta_0, det11_02, n_dist, a, b = fit_params + text_str += '\nFit params' + text_str += f'\n$t_p^\\mathrm{{factor}}$: {tp_factor:.3f}' + text_str += f'\n$t_\\mathrm{{mid}}$: {tmid*2.4e9:.3f} (#)' + text_str += f'\n$J_2/2\\pi$: {g*1e-6:.2f} MHz' + + props = dict(boxstyle='round', facecolor='white', alpha=1) + axs[2+3*n].text(1.45, 0.98, text_str, transform=axs[2+3*n].transAxes, fontsize=10, + verticalalignment='top', bbox=props, linespacing=1.6) + # Set axis labels and titles + for i in range(3): + axs[i+3*n].plot(opt[0], opt[1], 'o', mfc='white', mec='grey', mew=.5) + axs[i+3*n].set_xlabel('Amplitude') + axs[i+3*n].set_ylabel(r'B amplitude') + if type(Dets) != type(None): + set_xlabel(axs[i+3*n], f'{q0} detuning', unit='Hz') + if title: + fig.suptitle(title+'\n'+ts, y=1) + axs[0+3*n].set_title(f'Conditional phase {q0} {q1}') + axs[1+3*n].set_title(f'Missing fraction {q0} {q1}') + axs[2+3*n].set_title(f'Cost function {q0} {q1}') + else: + fig.suptitle(ts+f'\nQubits {q0} {q1}', y=.95, size=14) + axs[0].set_title(f'Conditional phase') + axs[1].set_title(f'Missing fraction') + axs[2].set_title(f'Cost function') + # Add qubit frequency axis and SNZ leakage fit contours + if type(Dets) != type(None): + # Add qubit frequency axis + axt0 = axs[0+3*n].twiny() + axt0.set_xlim((q0_freq-np.array(axs[0+3*n].get_xlim()))*1e-9) + axt0.set_xlabel(f'{q0} Frequency (GHz)') + axt1 = axs[1+3*n].twiny() + axt1.set_xlim((q0_freq-np.array(axs[1+3*n].get_xlim()))*1e-9) + axt1.set_xlabel(f'{q0} Frequency (GHz)') + axt2 = axs[2+3*n].twiny() + axt2.set_xlim((q0_freq-np.array(axs[2+3*n].get_xlim()))*1e-9) + axt2.set_xlabel(f'{q0} Frequency (GHz)') + # # This fit is not accurate ! + # # Plot SNZ leakage fitting contours + # _X = np.linspace(X[0], X[-1], 201) + # _Y = np.linspace(Y[0], Y[-1], 201) + # _X, _Y = np.meshgrid(_X, _Y) + # # Get interpolated landscape from fit + # # fit params + # # print(fit_params) + # tp_factor, tmid, g, delta_0, det11_02, n_dist, a, b = fit_params + # Pop20, Pop11, Cphase = SNZ2(delta=_X, B_amp=_Y, + # tp=tp_factor/(4*g), + # tmid=tmid, + # g=g, + # delta_0=delta_0, + # det11_02=det11_02, + # n_dist=n_dist) + # for i in range(2): + # # Plot leakage contours + # for c, a_ in zip([.05, .2, .6, .8], [.5, .7, .85, 1]): + # axs[i+2*n].contour(_X, _Y, Pop20, [c], colors=['w'], + # linewidths=[1], linestyles=['--'], alpha=a_) + # # # Plot 180 cphase contours + # # CS = axs[i+2*n].contour(_X, _Y, Cphase, [180], colors=['w'], + # # linewidths=[1.5], linestyles=['--'], alpha=1) + # # axs[i+2*n].clabel(CS, CS.levels, inline=True, fmt='$%i^\\circ$', fontsize=10) + + # Plot 180 cphase contour + # unwrap phase so contour is correctly estimated + AUX = np.zeros(CP.shape) + for i in range(len(CP)): + AUX[i] = np.deg2rad(CP[i])*1 + AUX[i] = np.unwrap(AUX[i]) + AUX[i] = np.rad2deg(AUX[i]) + for i in range(len(CP[:,i])): + AUX[:,i] = np.deg2rad(AUX[:,i]) + AUX[:,i] = np.unwrap(AUX[:,i]) + AUX[:,i] = np.rad2deg(AUX[:,i]) + if type(Dets) != type(None): + _x_axis = Dets + else: + _x_axis = Amps + cs = axs[1+3*n].contour(_x_axis, Bamps, AUX, levels=[180, 180+360], + colors='white', linestyles='--') + # axs[1+3*n].clabel(cs, inline=True, fontsize=10, fmt='$180^o$') + fig.tight_layout() + +def VCZ_L1_contour_plotfn( + ax, + Amps, Bamps, + CP, MF, CF, + q0, q1, ts, + gate_perf, + opt, direction=None, + q0_freq=None, Dets=None, + title=None, **kw): + fig = ax.get_figure() + axs = fig.get_axes() + def get_plot_axis(vals, rang=None): + dx = vals[1]-vals[0] + X = np.concatenate((vals, [vals[-1]+dx])) - dx/2 + if rang: + X = X/np.max(vals) * (rang[1]-rang[0]) + rang[0] + return X + def get_contour_idxs(CP): + phase = 180 + if np.mean(CP) > 300: + phase += 360 + idxs_i = [] + idxs_j = [] + for i in range(len(CP)): + idx = np.argmin(np.abs(CP[:,i]-phase)) + if np.abs(CP[idx, i]-phase) < 10: + idxs_i.append(i) + idxs_j.append(idx) + return idxs_i, idxs_j + ########################## + # Calculate contours + ########################## + # unwrap phase so contour is correctly estimated + AUX = np.zeros(CP.shape) + for i in range(len(CP)): + AUX[i] = np.deg2rad(CP[i])*1 + AUX[i] = np.unwrap(AUX[i]) + AUX[i] = np.rad2deg(AUX[i]) + for i in range(len(CP[:,i])): + AUX[:,i] = np.deg2rad(AUX[:,i]) + AUX[:,i] = np.unwrap(AUX[:,i]) + AUX[:,i] = np.rad2deg(AUX[:,i]) + idxs = get_contour_idxs(AUX) + # Plot versus transmon detuning + if type(Dets) != type(None): + X = get_plot_axis(Dets) + # Plot versus gain + else: + X = get_plot_axis(Amps) + Y = get_plot_axis(Bamps) + a1 = axs[0].pcolormesh(X, Y, MF, cmap='hot') + fig.colorbar(a1, ax=axs[0], label='missing fraction') + if type(Dets) != type(None): + _x_axis = Dets + else: + _x_axis = Amps + cs = axs[0].contour(_x_axis, Bamps, AUX, levels=[180, 180+360, 180+720], + colors='white', linestyles='--') + # axs[0].clabel(cs, inline=True, fontsize=10, fmt='$180^o$') + # Plot optimal points + axs[0].plot(opt[0], opt[1], 'o', mfc='white', mec='grey', mew=.5) + axs[1].axvline(opt[0], color='k', ls='--', alpha=.5) + axs[1].plot(_x_axis[idxs[0]], MF[idxs][::-1]/2*100) + # Set axis label and title + axs[0].set_xlabel('Amplitude') + axs[1].set_xlabel('Amplitude') + if type(Dets) != type(None): + set_xlabel(axs[0], f'{q0} detuning', unit='Hz') + set_xlabel(axs[1], f'{q0} detuning', unit='Hz') + axs[0].set_ylabel(r'B amplitude') + axs[1].set_ylabel(r'$L_1$ (%)') + # Add qubit frequency axis + if type(Dets) != type(None): + # Add qubit frequency axis + if q0_freq: + axt0 = axs[0].twiny() + axt0.set_xlim((q0_freq-np.array(axs[0].get_xlim()))*1e-9) + axt0.set_xlabel(f'{q0} Frequency (GHz)') + axt1 = axs[1].twiny() + axt1.set_xlim((q0_freq-np.array(axs[1].get_xlim()))*1e-9) + axt1.set_xlabel(f'{q0} Frequency (GHz)') + # Set title + fig.suptitle(ts+f'\nQubits {q0} {q1}', y=.9, size=14) + axs[0].set_title(f'Missing fraction') + axs[1].set_title(f'$L_1$ along contour') + fig.tight_layout() + + +class VCZ_flux_offset_sweep_Analysis(ba.BaseDataAnalysis): + """ + Analysis + """ + def __init__(self, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + auto=True): + + super().__init__(t_start=t_start, + t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) + if auto: + self.run_analysis() + + def extract_data(self): + self.get_timestamps() + self.timestamp = self.timestamps[0] + + data_fp = get_datafilepath_from_timestamp(self.timestamp) + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + self.raw_data_dict = h5d.extract_pars_from_datafile( + data_fp, param_spec) + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + + self.Q0 = self.raw_data_dict['folder'].split('_')[-3] + self.Q1 = self.raw_data_dict['folder'].split('_')[-2] + self.Q_parks = eval(self.raw_data_dict['folder'].split('_')[-1]) + + def process_data(self): + self.proc_data_dict = {} + # Sort data + Offset = self.raw_data_dict['data'][:,0] + self.proc_data_dict['Offset'] = Offset + CP = self.raw_data_dict['data'][:,1] + MF = self.raw_data_dict['data'][:,2] + self.proc_data_dict[f'CP'] = CP + self.proc_data_dict[f'MF'] = MF + # Fit data + self.qoi = {} + p_coef = np.polyfit(Offset, self.proc_data_dict[f'MF'], deg=2) + # Find minimum of leakage using derivative + p_func = np.poly1d(p_coef) + crit = p_func.deriv().roots + r_crit = crit[crit.imag==0].real + opt_offset = r_crit[0] + self.proc_data_dict[f'p_coef'] = p_coef + self.qoi[f'offset_opt'] = opt_offset + + def prepare_plots(self): + self.axs_dict = {} + self.figs[f'Offset_sweep_{self.Q0}_{self.Q1}'] = plt.figure(figsize=(8,3), dpi=100) + # self.figs[f'Offset_sweep_{self.Q0}_{self.Q1}'].patch.set_alpha(0) + axs = [self.figs[f'Offset_sweep_{self.Q0}_{self.Q1}'].add_subplot(121), + self.figs[f'Offset_sweep_{self.Q0}_{self.Q1}'].add_subplot(122)] + self.axs_dict[f'conditional_phase'] = axs[0] + self.axs_dict[f'missing_fraction'] = axs[1] + self.plot_dicts[f'Offset_sweep_{self.Q0}_{self.Q1}']={ + 'plotfn': Offset_sweep_plotfn, + 'ax_id': f'conditional_phase', + 'Offset' : self.proc_data_dict['Offset'], + 'CP' : self.proc_data_dict[f'CP'], + 'MF' : self.proc_data_dict[f'MF'], + 'p_coef' : self.proc_data_dict[f'p_coef'], + 'opt_offset' : self.qoi[f'offset_opt'], + 'q0' : self.Q0, 'q1' : self.Q1, + 'timestamp' : self.timestamp + } + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def Offset_sweep_plotfn( + ax, + Offset, + CP, MF, + p_coef, + opt_offset, + q0, q1, + timestamp, + **kw): + fig = ax.get_figure() + axs = fig.get_axes() + + axs[0].plot(Offset*1e3, CP, 'o') + axs[0].set_xlabel('Current offset (mA)') + axs[0].set_ylabel('Conditional phase (deg)') + lim = axs[0].get_ylim() + axs[0].set_ylim(lim[0]-10, lim[1]+10) + + p_func = np.poly1d(p_coef) + _offset = np.linspace(Offset[0], Offset[-1], 101) + axs[1].plot(_offset*1e3, p_func(_offset), 'C0--', label='Fit') + axs[1].plot(Offset*1e3, MF, 'C3o', label='data') + axs[1].axvline(opt_offset*1e3, color='k', ls='--', label=f'{opt_offset*1e3:.3f} mA') + axs[1].set_xlabel('Current offset (mA)') + axs[1].set_ylabel('Missing fration') + axs[1].legend(frameon=False, bbox_to_anchor=(1.01, 1), loc=2) + fig.suptitle(f'{timestamp}\nFlux offset sweep {q0} {q1}', y=1.0) + fig.tight_layout() + + +class VCZ_asymmetry_sweep_Analysis(ba.BaseDataAnalysis): + """ + Analysis + """ + def __init__(self, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + auto=True): + + super().__init__(t_start=t_start, + t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) + if auto: + self.run_analysis() + + def extract_data(self): + self.get_timestamps() + self.timestamp = self.timestamps[0] + + data_fp = get_datafilepath_from_timestamp(self.timestamp) + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + self.raw_data_dict = h5d.extract_pars_from_datafile( + data_fp, param_spec) + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + + self.Q0 = eval(self.raw_data_dict['folder'].split('_')[-3]) + self.Q1 = eval(self.raw_data_dict['folder'].split('_')[-2]) + self.Q_parks = eval(self.raw_data_dict['folder'].split('_')[-1]) + + def process_data(self): + self.proc_data_dict = {} + # Sort data + Asymmetry = self.raw_data_dict['data'][:,0] + self.proc_data_dict['Asymmetry'] = Asymmetry + for i, q0 in enumerate(self.Q0): + CP = self.raw_data_dict['data'][:,2*i+1] + MF = self.raw_data_dict['data'][:,2*i+2] + self.proc_data_dict[f'CP_{i}'] = CP + self.proc_data_dict[f'MF_{i}'] = MF + # Fit data + self.qoi = {} + for i, q0 in enumerate(self.Q0): + p_coef = np.polyfit(Asymmetry, + self.proc_data_dict[f'MF_{i}'], deg=2) + # Find minimum of leakage using derivative + p_func = np.poly1d(p_coef) + crit = p_func.deriv().roots + r_crit = crit[crit.imag==0].real + opt_asymmetry = r_crit[0] + self.proc_data_dict[f'p_coef_{i}'] = p_coef + self.qoi[f'asymmetry_opt_{i}'] = opt_asymmetry + + def prepare_plots(self): + self.axs_dict = {} + + for i, q0 in enumerate(self.Q0): + self.figs[f'Asymmetry_sweep_{q0}_{self.Q1[i]}'] = plt.figure(figsize=(8,3), dpi=100) + # self.figs[f'Asymmetry_sweep_{q0}_{self.Q1[i]}'].patch.set_alpha(0) + axs = [self.figs[f'Asymmetry_sweep_{q0}_{self.Q1[i]}'].add_subplot(121), + self.figs[f'Asymmetry_sweep_{q0}_{self.Q1[i]}'].add_subplot(122)] + self.axs_dict[f'conditional_phase_{i}'] = axs[0] + self.axs_dict[f'missing_fraction_{i}'] = axs[1] + self.plot_dicts[f'Asymmetry_sweep_{self.Q0[i]}_{self.Q1[i]}']={ + 'plotfn': Asymmetry_sweep_plotfn, + 'ax_id': f'conditional_phase_{i}', + 'Asymmetry' : self.proc_data_dict['Asymmetry'], + 'CP' : self.proc_data_dict[f'CP_{i}'], + 'MF' : self.proc_data_dict[f'MF_{i}'], + 'p_coef' : self.proc_data_dict[f'p_coef_{i}'], + 'opt_asymmetry' : self.qoi[f'asymmetry_opt_{i}'], + 'q0' : self.Q0[i], 'q1' : self.Q1[i], + 'timestamp' : self.timestamp + } + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def Asymmetry_sweep_plotfn( + ax, + Asymmetry, + CP, MF, + p_coef, + opt_asymmetry, + q0, q1, + timestamp, + **kw): + fig = ax.get_figure() + axs = fig.get_axes() + + axs[0].plot(Asymmetry*100, CP, 'o') + axs[0].set_xlabel('Pulse asymmetry (%)') + axs[0].set_ylabel('Conditional phase (deg)') + lim = axs[0].get_ylim() + axs[0].set_ylim(lim[0]-10, lim[1]+10) + + p_func = np.poly1d(p_coef) + _asym = np.linspace(Asymmetry[0], Asymmetry[-1], 101) + axs[1].plot(_asym*100, p_func(_asym), 'C0--', label='Fit') + axs[1].plot(Asymmetry*100, MF, 'C3o', label='data') + axs[1].axvline(opt_asymmetry*100, color='k', ls='--', label=f'${opt_asymmetry*100:.3f}$%') + axs[1].set_xlabel('Pulse asymmetry, (%)') + axs[1].set_ylabel('Missing fration') + axs[1].legend(frameon=False, bbox_to_anchor=(1.01, 1), loc=2) + + fig.suptitle(f'{timestamp}\nAsymmetry sweep {q0} {q1}', y=1.0) + fig.tight_layout() + + +def avoided_crossing_fit_func(x, J, alpha): + x_rad = x*2*np.pi + J_rad = J*2*np.pi + alpha_rad = alpha*2*np.pi + w_err = 2*J_rad**2/(x_rad-alpha_rad) + # rad_err = np.pi*w_err/(2*np.sqrt(2)*J_rad) + rad_err = w_err/(2*J_rad) + deg_err = rad_err*180/np.pi + return np.mod(deg_err+180 , 360) - 180 + +class Park_frequency_sweep_analysis(ba.BaseDataAnalysis): + """ + Analysis + """ + def __init__(self, + qH: str, + qL: str, + qP: str, + Parking_distances: list, + freq_qH: float = None, + alpha_qH: float = None, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + auto=True): + super().__init__(t_start=t_start, + t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) + self.qH = qH + self.qL = qL + self.qP = qP + self.Parking_distances = Parking_distances + self.alpha_qH = alpha_qH + self.freq_qH = freq_qH + if auto: + self.run_analysis() + + def extract_data(self): + self.get_timestamps() + self.timestamp = self.timestamps[0] + data_fp = get_datafilepath_from_timestamp(self.timestamp) + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + self.raw_data_dict = h5d.extract_pars_from_datafile( + data_fp, param_spec) + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + + def process_data(self): + self.proc_data_dict = {} + self.qoi = {} + # Sort data + Amps = self.raw_data_dict['data'][:,0] + # qH single qubit phases with qP in 0 or 1 ("_s") + Phi = self.raw_data_dict['data'][:,1] + Phi_s = self.raw_data_dict['data'][:,2] + Delta_phi = self.raw_data_dict['data'][:,3] + # Conditional phases between qH and qL with qP in 0 or 1 ("_s") + Phi_cond = self.raw_data_dict['data'][:,4] + Phi_cond_s = self.raw_data_dict['data'][:,5] + Delta_phi_cond = self.raw_data_dict['data'][:,6] + # Missing fraction of qL with qP in 0 or 1 ("_s") + Miss_frac = self.raw_data_dict['data'][:,7] + Miss_frac_s = self.raw_data_dict['data'][:,8] + Delta_miss_frac = self.raw_data_dict['data'][:,9] + # Fit avoided crossing + from scipy.optimize import curve_fit + _x = self.Parking_distances[30:]*1+0 + _y = Delta_phi_cond[30:]*1+0 + p0 = [600e6, 20e6, 20e6] + # popt, pcov = curve_fit(avoided_crossing_fit_func, _x, _y, + # p0 = p0, bounds=([ _x[0], 5e6, 5e6], + # [_x[-1], 50e6, 50e6]) + # ) + # print(pcov) + # print(popt) + popt = p0 + self.proc_data_dict['popt'] = popt + # Save data in processed data dict + self.proc_data_dict['Phi'] = Phi + self.proc_data_dict['Phi_s'] = Phi_s + self.proc_data_dict['Delta_phi'] = Delta_phi + self.proc_data_dict['Phi_cond'] = Phi_cond + self.proc_data_dict['Phi_cond_s'] = Phi_cond_s + self.proc_data_dict['Delta_phi_cond'] = Delta_phi_cond + self.proc_data_dict['Miss_frac'] = Miss_frac + self.proc_data_dict['Miss_frac_s'] = Miss_frac_s + self.proc_data_dict['Delta_miss_frac'] = Delta_miss_frac + + def prepare_plots(self): + self.axs_dict = {} + fig, axs = plt.subplots(figsize=(5,5), nrows=2, ncols=2, dpi=100) + axs = axs.flatten() + self.figs[f'Park_sweep_gate_{self.qH}_{self.qL}_park_{self.qP}'] = fig + self.axs_dict['plot_1'] = axs[0] + # fig.patch.set_alpha(0) + self.plot_dicts[f'Park_sweep_gate_{self.qH}_{self.qL}_park_{self.qP}']={ + 'plotfn': park_sweep_plotfn, + 'ax_id': 'plot_1', + 'qH': self.qH, + 'qL': self.qL, + 'qP': self.qP, + 'Parking_distances': self.Parking_distances, + 'Phi' : self.proc_data_dict['Phi'], + 'Phi_s' : self.proc_data_dict['Phi_s'], + 'Delta_phi' : self.proc_data_dict['Delta_phi'], + 'Phi_cond' : self.proc_data_dict['Phi_cond'], + 'Phi_cond_s' : self.proc_data_dict['Phi_cond_s'], + 'Delta_phi_cond' : self.proc_data_dict['Delta_phi_cond'], + 'Miss_frac' : self.proc_data_dict['Miss_frac'], + 'Miss_frac_s' : self.proc_data_dict['Miss_frac_s'], + 'Delta_miss_frac' : self.proc_data_dict['Delta_miss_frac'], + 'alpha_qH': self.alpha_qH, + 'popt': self.proc_data_dict['popt'], + 'timestamp': self.timestamps[0]} + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def park_sweep_plotfn( + ax, + qH, qL, qP, + Parking_distances, + Phi, Phi_s, Delta_phi, + Phi_cond, Phi_cond_s, Delta_phi_cond, + Miss_frac, Miss_frac_s, Delta_miss_frac, + timestamp, alpha_qH, popt, + **kw): + fig = ax.get_figure() + axs = fig.get_axes() + # Plot of single-qubit phase of qH + axs[0].plot(Parking_distances*1e-6, Phi_cond, 'C0.') + if alpha_qH: + axs[0].axvline(-alpha_qH*1e-6, ls='--', color='k', lw=1) + axs[0].text(-alpha_qH*1e-6, 180, f'$-\\alpha_{{{qH}}}$', + va='center', ha='center', size=8, + bbox=dict(boxstyle='round', facecolor='w', alpha=1, lw=0)) + axs[0].set_ylim(-90+180, 90+180) + axs[0].set_ylabel(f'$\\phi_\\mathrm{{cond}}^\\mathrm{{{qH},{qL}}}$ (deg)') + axs[0].axhline(180, ls='--', color='k', lw=1, alpha=.25, zorder=10) + # Plot of qH-qL conditional phase + axs[2].plot(Parking_distances*1e-6, Delta_phi, 'C0.') + if alpha_qH: + axs[2].axvline(-alpha_qH*1e-6, ls='--', color='k', lw=1) + axs[2].text(-alpha_qH*1e-6, 0, f'$-\\alpha_{{{qH}}}$', + va='center', ha='center', size=8, + bbox=dict(boxstyle='round', facecolor='w', alpha=1, lw=0)) + axs[2].set_ylim(-90, 90) + axs[2].set_ylabel(f'$\\delta \\phi_\\mathrm{{{qH}}}$ (deg)') + axs[2].set_xlabel(f'$\\Delta_\\mathrm{{{qH},{qP}}}$ (MHz)') + axs[2].axhline(0, ls='--', color='k', lw=1, alpha=.25, zorder=10) + # Plot of qH-qL conditional phase difference for different qP states + # axs[1].plot(Parking_distances*1e-6, + # avoided_crossing_fit_func(Parking_distances, *popt), 'k--') + axs[1].plot(Parking_distances*1e-6, Delta_phi_cond, 'C0.') + axs[1].set_ylim(-90, 90) + axs[1].set_ylabel('$\\delta \\phi_\\mathrm{{cond}}$ (deg)') + axs[1].axhline(0, ls='--', color='k', lw=1, alpha=.25, zorder=10) + # Plot of Missing fractions + axs[3].plot(Parking_distances*1e-6, Miss_frac/2, 'C0-', alpha=.25, label='$L_{{1_{{|0\\rangle_P}}}}$') + axs[3].plot(Parking_distances*1e-6, Miss_frac_s/2, 'C3-', alpha=.25, label='$L_{{1_{{|1\\rangle_P}}}}$') + axs[3].plot(Parking_distances*1e-6, np.abs(Delta_miss_frac)/2, 'C0.') + axs[3].set_xlabel(f'$\\Delta_\\mathrm{{{qH},{qP}}}$ (MHz)') + axs[3].set_ylabel('$|\\delta L_1|$') + axs[3].legend(frameon=False) + # twin axes for qL-qP detuning + ax0 = axs[0].twiny() + ax0.set_xlim(np.array(axs[0].get_xlim())-300) + ax0.set_xlabel(f'$\\Delta_\\mathrm{{{qL},{qP}}}$ (MHz)') + ax1 = axs[1].twiny() + ax1.set_xlim(np.array(axs[1].get_xlim())-300) + ax1.set_xlabel(f'$\\Delta_\\mathrm{{{qL},{qP}}}$ (MHz)') + ax2 = axs[2].twiny() + ax2.set_xlim(np.array(axs[2].get_xlim())) + ax2.set_xticklabels([]) + ax3 = axs[3].twiny() + ax3.set_xlim(np.array(axs[3].get_xlim())) + ax3.set_xticklabels([]) + # Adjust positions of axis + pos = axs[0].get_position() + axs[0].set_position([pos.x0, pos.y0, pos.width, pos.height]) + pos = axs[1].get_position() + axs[1].set_position([pos.x0+.1, pos.y0, pos.width, pos.height]) + pos = axs[2].get_position() + axs[2].set_position([pos.x0, pos.y0+.02, pos.width, pos.height]) + pos = axs[3].get_position() + axs[3].set_position([pos.x0+.1, pos.y0+.02, pos.width, pos.height]) + axs[0].set_xticklabels([]) + axs[1].set_xticklabels([]) + # Drawing of two-qubit gate scheme + from matplotlib.patches import Circle + ax = fig.add_subplot(221) + pos = ax.get_position() + ax.set_position([pos.x0+pos.width*(1-.425*1.1-.05), pos.y0+pos.height*(1-.45*1.1+.03), + pos.width*.425*1.1, pos.height*.45*1.1]) + patch = Circle((0, 0.5), radius=.3, color='C0', lw=1, ec='k') + ax.add_patch(patch) + patch = Circle((0.75, -0.5), radius=.3, color='C0', lw=1, ec='k') + ax.add_patch(patch) + patch = Circle((-0.75, -0.5), radius=.3, color='C3', lw=1, ec='k') + ax.add_patch(patch) + ax.plot([0, .75], [.5, -.5], c='k', zorder=-1, lw=3) + ax.plot([0, -.75], [.5, -.5], c='k', zorder=-1, lw=3, ls=(.1,(1,.5)), alpha=.5) + ax.text(0, .5, qH, va='center', ha='center', color='w') + ax.text(.75, -.5, qL, va='center', ha='center', color='w') + ax.text(-.75, -.5, qP, va='center', ha='center', color='w') + ax.set_xlim(-1.1,1.1) + ax.set_ylim(-1.1,1.1) + ax.axis('off') + # Title + fig.suptitle(f'{timestamp}\nPark sweep {qP} gate {qH},{qL}', y=1.075) + + +def convert_amp_to_freq(poly_coefs, ch_range, ch_amp, dac_amp): + ''' + Helper function to convert flux pulse amp to frequency detuning. + ''' + poly_func = np.poly1d(poly_coefs) + out_volt = dac_amp*ch_amp*ch_range/2 + freq_det = poly_func(out_volt) + return freq_det + +def vcz_waveform(sampling_rate, + amp_at_int_11_02, + norm_amp_fine, + amp_pad, + amp_pad_samples, + asymmetry, + time_sqr, + time_middle, + time_pad, + use_asymmety, + use_net_zero_pulse, + ): + ''' + Trace SNZ waveform. + ''' + amp_at_sweetspot = 0.0 + dt = 1 + norm_amp_sq = 1 + time_sqr = time_sqr * sampling_rate + time_middle = time_middle * sampling_rate + time_pad = time_pad * sampling_rate + # This is to avoid numerical issues when the user would run sweeps with + # e.g. `time_at_swtspt = np.arange(0/2.4e9, 10/ 2.4e9, 2/2.4e9)` + # instead of `time_at_swtspt = np.arange(0, 42, 2) / 2.4e9` and get + # bad results for specific combinations of parameters + time_middle = np.round(time_middle / dt) * dt + time_sqr = np.round(time_sqr / dt) * dt + time_pad = np.round(time_pad / dt) * dt + # build padding part of waveform + pad_amps = np.full(int(time_pad / dt), 0) + amp_pad*2 + for _i in range(len(pad_amps)): + if _i3, \ + 'Not enough time steps in Chevron\nTrying other timestamp...' + self.TLS_analysis[q] = a + break + except: + print_exception() + except: + print_exception() + print(f'No valid TLS landscape data found for {q}') + # save data in raw data dictionary + self.raw_data_dict = data + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + + def process_data(self): + data = self.raw_data_dict + self.proc_data_dict = {q : {} for q in self.Qubits} + for q in self.Qubits: + self.proc_data_dict[q]['frequency'] = data[q]['frequency'] + self.proc_data_dict[q]['anharmonicity'] = data[q]['anharmonicity'] + # estimate detunings at each amplitude + for d in ['NW', 'NE', 'SW', 'SE']: + # Trace CZ waveform + _wf = vcz_waveform( + sampling_rate = 2.4e9, + amp_at_int_11_02 = data[q][f'amp_{d}'], + norm_amp_fine = data[q][f'B_amp_{d}'], + amp_pad = data[q][f'amp_pad_{d}'], + amp_pad_samples = data[q][f'amp_pad_samples_{d}'], + asymmetry = data[q][f'asymmetry_{d}'], + time_sqr = data[q][f'tp_{d}'], + time_middle = data[q][f'tmid_{d}'], + time_pad = data[q][f'tpad_{d}'], + use_asymmety = data[q][f'use_asymmetry_{d}'], + use_net_zero_pulse = data[q][f'use_net_zero_pulse_{d}']) + self.proc_data_dict[q][f'cz_waveform_{d}'] = _wf + # Convert CZ waveform into frequency trajectory + _Ftrajectory = -convert_amp_to_freq(data[q]['poly_coefs'], + data[q]['ch_range'], + data[q]['ch_amp'], _wf) + _Ftrajectory += data[q]['frequency'] + self.proc_data_dict[q][f'cz_freq_trajectory_{d}'] = _Ftrajectory + # Parking trajectories + _wf = gen_park(sampling_rate = 2.4e9, + park_length = data[q]['t_park'], + park_pad_length = data[q]['tpad_park'], + park_amp = data[q]['park_amp'], + park_double_sided = data[q]['park_double_sided']) + self.proc_data_dict[q]['park_waveform'] = _wf + _Ftrajectory = -convert_amp_to_freq(data[q]['poly_coefs'], + data[q]['ch_range'], + data[q]['ch_amp'], _wf) + _Ftrajectory += data[q]['frequency'] + self.proc_data_dict[q]['park_freq_trajectory'] = _Ftrajectory + # Idling trajectory + n_points = len(_Ftrajectory) + self.proc_data_dict[q]['idle_freq_trajectory'] = np.full(n_points, data[q]['frequency']) + + def prepare_plots(self): + self.axs_dict = {} + for qH, qL in self.Qubit_pairs: + + fig, ax = plt.subplots(figsize=(4,4), dpi=100) + self.figs[f'{qH}_{qL}_Gate_frequency_trajectory'] = fig + self.axs_dict[f'plot_{qH}_{qL}'] = ax + # fig.patch.set_alpha(0) + self.plot_dicts[f'{qH}_{qL}_Gate_frequency_trajectory']={ + 'plotfn': CZ_frequency_trajectory_plotfn, + 'ax_id': f'plot_{qH}_{qL}', + 'data': self.proc_data_dict, + 'qH': qH, + 'qL': qL, + + 'TLS_analysis_dict': self.TLS_analysis, + 'timestamp': self.timestamps[0]} + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def CZ_frequency_trajectory_plotfn( + ax, + data, qH, qL, + timestamp, + TLS_analysis_dict, + include_TLS_landscape=True, + **kw): + fig = ax.get_figure() + # Compile all relevant freq. trajectories + directions = get_gate_directions(qH, qL) + parked_qubits = get_parking_qubits(qH, qL) + wf = { qH: f'cz_freq_trajectory_{directions[0]}', + qL: f'cz_freq_trajectory_{directions[1]}' } + for q in parked_qubits: + wf[q] = 'park_freq_trajectory' + # Draw CZ trajectories + for q, _wf in wf.items(): + if q in parked_qubits: + ax.plot(data[q][_wf]*1e-9, '--', markersize=3, lw=1, label=f'{q}') + else: + ax.plot(data[q][_wf]*1e-9, '.-', markersize=3, lw=1, label=f'{q}') + # if q == qH: # plot 02 level + # ax.plot((data[q][_wf]+data[q]['anharmonicity'])*1e-9, 'C0.-', + # alpha=.5, markersize=3, lw=1, label=f'{q}') + # labels + ax.text(5, data[q][_wf][5]*1e-9+.015, f'{q}') + # settings of plot + ax.set_title(f'{timestamp}\n{qH}, {qL} Gate') + ax.set_ylabel('Frequency (GHz)') + ax.set_xlabel('Time (# samples)') + ax.grid(ls='--', alpha=.5) + # Side plots for TLS landscapes + if include_TLS_landscape: + axR = fig.add_subplot(111) + pos = axR.get_position() + axR.set_position([pos.x0+pos.width*1.005, pos.y0, pos.width*0.2, pos.height]) + def get_plot_axis(vals, rang=None): + if len(vals)>1: + n = len(vals)//2 + dx = vals[n]-vals[n-1] + X = np.concatenate((vals, [vals[-1]+dx])) - dx/2 + else: + X = vals + return X + Detunings = data[qH]['frequency'] - get_plot_axis(TLS_analysis_dict[qH].proc_data_dict['Detunings']) + Times = get_plot_axis(TLS_analysis_dict[qH].proc_data_dict['Times']) + Pop = TLS_analysis_dict[qH].proc_data_dict['Pop'] + # Frequency qubit population + vmax = min([1, np.max(Pop)]) + vmax = 1#max([vmax, 0.15]) + im = axR.pcolormesh(Times*1e9, Detunings*1e-9, Pop.transpose(), vmax=vmax) + axR.text(Times[len(Times)//2]*1e9, Detunings[0]*1e-9-.05, qH, ha='center', va='top', color='w') + axR.set_title('High qubit', size=7) + if qL in TLS_analysis_dict.keys(): + axL = fig.add_subplot(221) + # using previous axis position + axL.set_position([pos.x0+pos.width*(1.21), pos.y0, + pos.width*0.2, pos.height]) + Detunings = data[qL]['frequency'] - get_plot_axis(TLS_analysis_dict[qL].proc_data_dict['Detunings']) + Pop = TLS_analysis_dict[qL].proc_data_dict['Pop'] + # Frequency qubit population + vmax = min([1, np.max(Pop)]) + vmax = 1#max([vmax, 0.15]) + im = axL.pcolormesh(Times*1e9, Detunings*1e-9, Pop.transpose(), vmax=vmax) + axL.text(Times[len(Times)//2]*1e9, max(Detunings)*1e-9-.05, qL, ha='center', va='top', color='w') + # axR.axhline(max(Detunings)*1e-9, color='w') + axL.set_title('Low qubit', size=7) + axL.set_ylim(ax.get_ylim()) + axL.yaxis.tick_right() + axL.set_xticks([]) + axL.axis('off') + axR.set_ylim(ax.get_ylim()) + axR.yaxis.tick_right() + axR.set_xticks([]) + axR.axis('off') + # Parked qubit plots + i = 1 + for q in parked_qubits: + if q in TLS_analysis_dict.keys(): + axP = fig.add_subplot(221+i) + # using previous axis position + axP.set_position([pos.x0+pos.width*(1.21 + i*.205), pos.y0, + pos.width*0.2, pos.height]) + + Detunings = data[q]['frequency'] - get_plot_axis(TLS_analysis_dict[q].proc_data_dict['Detunings']) + Pop = TLS_analysis_dict[q].proc_data_dict['Pop'] + # Frequency qubit population + vmax = min([1, np.max(Pop)]) + vmax = 1#max([vmax, 0.15]) + im = axP.pcolormesh(Times*1e9, Detunings*1e-9, Pop.transpose(), vmax=vmax) + axP.text(Times[len(Times)//2]*1e9, max(Detunings)*1e-9-.05, q, ha='center', va='top', color='w') + axP.set_title('Park qubit', size=7) + axP.set_ylim(ax.get_ylim()) + axP.yaxis.tick_right() + axP.set_xticks([]) + axP.axis('off') + i += 1 + + +class Parity_check_ramsey_analysis(ba.BaseDataAnalysis): + """ + Analysis + """ + def __init__(self, + Q_target, + Q_control, + Q_spectator, + control_cases, + angles, + solve_for_phase_gate_model:bool = False, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + auto=True): + + super().__init__(t_start=t_start, + t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) + self.Q_target = Q_target + self.Q_control = Q_control + self.Q_spectator = Q_spectator + self.control_cases = control_cases + self.angles = angles + self.solve_for_phase_gate_model = solve_for_phase_gate_model + if auto: + self.run_analysis() + + def extract_data(self): + self.get_timestamps() + self.timestamp = self.timestamps[0] + + data_fp = get_datafilepath_from_timestamp(self.timestamp) + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + self.raw_data_dict = h5d.extract_pars_from_datafile( + data_fp, param_spec) + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + + def process_data(self): + self.proc_data_dict = {} + self.qoi = {} + # Processing + n = len(self.Q_target+self.Q_control) + detector_list = [ name.decode().split(' ')[-1] for name in + self.raw_data_dict['value_names']] + calibration_points = ['{:0{}b}'.format(i, n) for i in range(2**n)] + self.calibration_points = calibration_points + Ramsey_curves = {} + Cal_points = {} + for k, q in enumerate(self.Q_target+self.Q_control): + # Sort raw data + q_idx = detector_list.index(q) + Cal_points_raw = self.raw_data_dict['data'][-len(calibration_points):,1+q_idx] + Ramsey_curves_raw = { case : self.raw_data_dict['data'][i*len(self.angles):(i+1)*len(self.angles),1+q_idx]\ + for i, case in enumerate(self.control_cases) } + # Sort and calculate calibration point levels + selector = np.tile(np.concatenate([np.zeros(2**(n-k-1)), + np.ones(2**(n-k-1))]), 2**(k)) + Cal_0 = np.mean(Cal_points_raw[~np.ma.make_mask(selector)]) + Cal_1 = np.mean(Cal_points_raw[np.ma.make_mask(selector)]) + # Convert to probability + Cal_points[q] = (Cal_points_raw-Cal_0)/(Cal_1-Cal_0) + Ramsey_curves[q] = { case : (Ramsey_curves_raw[case]-Cal_0)/(Cal_1-Cal_0)\ + for case in self.control_cases } + + # Fit phases + from scipy.optimize import curve_fit + def func(x, phi, A, B): + return A*(np.cos( (x+phi)/360 *2*np.pi )+1)/2 + B + Fit_res = { q : {} for q in self.Q_target} + for q in self.Q_target: + for case in self.control_cases: + # print(Ramsey_curves[q][case]) + popt, pcov = curve_fit(func, self.angles, Ramsey_curves[q][case], + p0 = [90, .9, 0], + bounds=[(-100, 0, -np.inf), (300, np.inf, np.inf)]) + Fit_res[q][case] = popt + + # Missing fraction + P_excited = {} + Missing_fraction = {} + L_0 = {} + L_1 = {} + L_2 = {} + n_c = len(self.Q_control) + for i, q in enumerate(self.Q_control): + P_excited[q] = { case : np.mean(Ramsey_curves[q][case]) for case in self.control_cases } + L_0[q] = [] + L_1[q] = [] + L_2[q] = [] + for case in self.control_cases: + if case[i] == '0': + L_0[q].append( P_excited[q][case] ) + elif case[i] == '1': + L_1[q].append( P_excited[q][case] ) + elif case[i] == '2': + L_2[q].append( P_excited[q][case] ) + else: + raise(f'Control case {case} not valid.') + L_0[q] = np.mean(L_0[q]) + L_1[q] = np.mean(L_1[q]) + L_2[q] = np.mean(L_2[q]) + Missing_fraction[q] = L_1[q]-L_0[q] + + # Solve for Phase gate model + Phase_model = {} + if self.solve_for_phase_gate_model: + for q in self.Q_target: + n_c = len(self.Q_control) + Phase_vec = np.array([Fit_res[q][c][0] for c in self.control_cases]) + if self.Q_spectator: + n_spec = len(self.Q_spectator) + Phase_model[q] = get_phase_model_values(n_c, Phase_vec, n_spec) + else: + Phase_model[q] = get_phase_model_values(n_c, Phase_vec) + self.proc_data_dict['Ramsey_curves'] = Ramsey_curves + self.proc_data_dict['Cal_points'] = Cal_points + self.proc_data_dict['Fit_res'] = Fit_res + self.proc_data_dict['P_excited'] = P_excited + self.proc_data_dict['L_0'] = L_0 + self.proc_data_dict['L_1'] = L_1 + self.proc_data_dict['Missing_fraction'] = Missing_fraction + + self.qoi['Missing_fraction'] = Missing_fraction + # self.qoi['L_0'] = L_0 + # self.qoi['L_1'] = L_1 + self.qoi['P_excited'] = P_excited + self.qoi['Phases'] = {} + self.qoi['Contrast'] = {} + for q in self.Q_target: + self.qoi['Phases'][q] = { c:Fit_res[q][c][0] for c in self.control_cases } + self.qoi['Contrast'][q] = { c:Fit_res[q][c][1] for c in self.control_cases } + if self.solve_for_phase_gate_model: + self.qoi['Phase_model'] = Phase_model + + def prepare_plots(self): + self.axs_dict = {} + + Q_total = self.Q_target+self.Q_control + n = len(Q_total) + fig, axs = plt.subplots(figsize=(7,2*n), nrows=n, sharex=True, dpi=100) + self.figs[f'Parity_check_Ramsey_{"_".join(Q_total)}'] = fig + self.axs_dict[f'plot_1'] = axs[0] + # fig.patch.set_alpha(0) + + self.plot_dicts[f'Parity_check_Ramsey_{"_".join(Q_total)}']={ + 'plotfn': Ramsey_curves_plotfn, + 'ax_id': f'plot_1', + 'Q_target': self.Q_target, + 'Q_control': self.Q_control, + 'angles': self.angles, + 'calibration_points': self.calibration_points, + 'control_cases': self.control_cases, + 'Ramsey_curves': self.proc_data_dict['Ramsey_curves'], + 'Cal_points': self.proc_data_dict['Cal_points'], + 'Fit_res': self.proc_data_dict['Fit_res'], + 'L_0': self.proc_data_dict['L_0'], + 'L_1': self.proc_data_dict['L_1'], + 'Missing_fraction': self.proc_data_dict['Missing_fraction'], + 'timestamp': self.timestamps[0]} + + for i, q in enumerate(self.Q_target): + Q_total = [q]+self.Q_control + fig, axs = plt.subplots(figsize=(9,4), ncols=2, dpi=100) + self.figs[f'Parity_check_phases_{"_".join(Q_total)}'] = fig + self.axs_dict[f'plot_phases_{i}'] = axs[0] + + self.plot_dicts[f'Parity_check_phases_{"_".join(Q_total)}']={ + 'plotfn': Phases_plotfn, + 'ax_id': f'plot_phases_{i}', + 'q_target': q, + 'Q_control': self.Q_control, + 'Q_spectator': self.Q_spectator, + 'control_cases': self.control_cases, + 'Phases': self.qoi['Phases'], + 'timestamp': self.timestamps[0]} + + n = len(self.Q_control) + fig, axs = plt.subplots(figsize=(5,2*n), nrows=n, sharex=True, dpi=100) + if type(axs) != np.ndarray: + axs = [axs] + self.figs[f'Parity_check_missing_fraction_{"_".join(Q_total)}'] = fig + self.axs_dict[f'plot_3'] = axs[0] + self.plot_dicts[f'Parity_check_missing_fraction_{"_".join(Q_total)}']={ + 'plotfn': Missing_fraction_plotfn, + 'ax_id': f'plot_3', + 'Q_target': self.Q_target, + 'Q_control': self.Q_control, + 'P_excited': self.proc_data_dict['P_excited'], + 'control_cases': self.control_cases, + 'timestamp': self.timestamps[0]} + + if self.solve_for_phase_gate_model: + for i, q in enumerate(self.Q_target): + Q_total = [q]+self.Q_control + fig, ax = plt.subplots(figsize=(5,4)) + self.figs[f'Phase_gate_model_{"_".join(Q_total)}'] = fig + self.axs_dict[f'plot_phase_gate_{i}'] = ax + self.plot_dicts[f'Phase_gate_model_{"_".join(Q_total)}']={ + 'plotfn': Phase_model_plotfn, + 'ax_id': f'plot_phase_gate_{i}', + 'q_target': q, + 'Q_control': self.Q_control, + 'Q_spectator': self.Q_spectator, + 'Phase_model': self.qoi['Phase_model'][q], + 'timestamp': self.timestamps[0]} + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def Ramsey_curves_plotfn( + ax, + Q_target, + Q_control, + angles, + calibration_points, + control_cases, + Ramsey_curves, + Cal_points, + Fit_res, + L_0, + L_1, + Missing_fraction, + timestamp, + **kw): + fig = ax.get_figure() + axs = fig.get_axes() + + def func(x, phi, A, B): + return A*(np.cos( (x+phi)/360 *2*np.pi )+1)/2 + B + + cal_ax = np.arange(len(calibration_points))*10+360 + if len(control_cases) == 2: + Colors = { case : color for color, case in zip(['C2', 'C3'],control_cases)} + else: + from matplotlib.cm import hsv + # Attempts to create distinguishable colors, using hsv color mapping [0, 1]. + Colors = { case : hsv(x) for x, case in zip(np.linspace(0,1 - 1/len(control_cases),len(control_cases)), control_cases)} + for i, q in enumerate(Q_target+Q_control): + for case in control_cases: + if q in Q_target: + _case_str = '' + for j, _q in enumerate(Q_control): + _case_str += f'{case[j]}_'+'{'+_q+'}' + _label = '$|'+_case_str+rf'\rangle$ : {Fit_res[q][case][0]:.1f}' + _angles = np.linspace(angles[0], angles[-1], 101) + axs[i].plot(_angles, func(_angles, *Fit_res[q][case]), + '--', color=Colors[case], alpha=1 if len(control_cases)==2 else .5, + label=_label) + axs[i].plot(angles, Ramsey_curves[q][case], + '.', color=Colors[case], alpha=1 if len(control_cases)==2 else .5) + axs[i].plot(cal_ax, Cal_points[q], 'C0.-') + axs[i].legend(frameon=False, bbox_to_anchor=(1.04,1), loc="upper left") + if q in Q_control: + axs[i].plot([angles[0], angles[-1]], [L_0[q], L_0[q]], 'k--') + axs[i].plot([angles[0], angles[-1]], [L_1[q], L_1[q]], 'k--', + label = f'Missing fac. : {Missing_fraction[q]*100:.1f} %') + axs[i].legend(loc=2, frameon=False) + axs[i].set_ylabel(f'Population {q}') + axs[-1].set_xticks(np.arange(0, 360, 60)) + axs[-1].set_xlabel('Phase (deg), calibration points') + axs[0].set_title(f'{timestamp}\nParity check ramsey '+\ + f'{" ".join(Q_target)} with control qubits {" ".join(Q_control)}') + +def Phases_plotfn( + ax, + q_target, + Q_control, + Q_spectator, + control_cases, + Phases, + timestamp, + **kw): + fig = ax.get_figure() + axs = fig.get_axes() + # Sort control cases by number of excitations + # and get ideal phases vector "vec" + if Q_spectator: + n_spec = len(Q_spectator) + cases_sorted = [control_cases[0], control_cases[1]] + vec = [0, 0] + for n in range(len(control_cases[0])): + for c in control_cases: + if c[:-n_spec].count('1') == n+1: + cases_sorted.append(c) + vec.append(180*np.mod(n+1%2,2)) + else: + cases_sorted = [control_cases[0]] + vec = [0] + for n in range(len(control_cases[0])): + for c in control_cases: + if c.count('1') == n+1: + cases_sorted.append(c) + vec.append(180*np.mod(n+1%2,2)) + # Phase error vector + q = q_target + phase_err_sorted = np.array([Phases[q][c] for c in cases_sorted])-np.array(vec) + + axs[0].plot(cases_sorted, np.zeros(len(cases_sorted))+180, 'k--') + axs[0].plot(cases_sorted, np.zeros(len(cases_sorted)), 'k--') + axs[0].plot(cases_sorted, [Phases[q][c] for c in cases_sorted], 'o-') + axs[0].set_xticks(axs[0].get_xticks()) + axs[0].set_xticklabels([fr'$|{c}\rangle$' for c in cases_sorted], rotation=90, fontsize=7) + axs[0].set_yticks([0, 45, 90, 135, 180]) + axs[0].set_xlabel(fr'Control qubit states $|${",".join(Q_control)}$\rangle$') + axs[0].set_ylabel(f'{q_target} Phase (deg)') + axs[0].grid(ls='--') + + axs[1].bar(cases_sorted, phase_err_sorted, zorder=10) + axs[1].grid(ls='--', zorder=-10) + axs[1].set_xticks(axs[1].get_xticks()) + axs[1].set_xticklabels([fr'$|{c}\rangle$' for c in cases_sorted], rotation=90, fontsize=7) + axs[1].set_xlabel(fr'Control qubit states $|${",".join(Q_control)}$\rangle$') + axs[1].set_ylabel(f'{q_target} Phase error (deg)') + fig.suptitle(f'{timestamp}\nParity check ramsey '+\ + f'{q_target} with control qubits {" ".join(Q_control)}', y=1.0) + fig.tight_layout() + +def Missing_fraction_plotfn( + ax, + Q_target, + Q_control, + P_excited, + control_cases, + timestamp, + **kw): + fig = ax.get_figure() + axs = fig.get_axes() + + for i, q in enumerate(Q_control): + axs[i].plot([P_excited[q][case]*100 for case in control_cases], 'C0o-') + + axs[i].grid(ls='--') + axs[i].set_xticks(np.arange(len(control_cases))) + axs[i].set_xticklabels([fr'$|{c}\rangle$' for c in control_cases], rotation=90) + + axs[-1].set_xlabel(fr'Control qubit states $|${",".join(Q_control)}$\rangle$') + axs[i].set_ylabel(f'$P_\{"mathrm{exc}"}$ {q} (%)') + + axs[0].set_title(f'{timestamp}\nParity check ramsey '+\ + f'{" ".join(Q_target)} with control qubits {" ".join(Q_control)}') + +def get_phase_model_values(n, Phase_vec, n_spec=None): + # Get Operator matrix dictionary + I = np.array([[1, 0], + [0, 1]]) + Z = np.array([[1, 0], + [0,-1]]) + Operators = {} + for s in ['{:0{}b}'.format(i, n) for i in range(2**n)]: + op_string = '' + op_matrix = 1 + for i in s: + if i == '0': + op_string += 'I' + op_matrix = np.kron(op_matrix, I) + else: + op_string += 'Z' + op_matrix = np.kron(op_matrix, Z) + Operators[op_string] = op_matrix + # Calculate M matrix + M = np.zeros((2**n,2**n)) + for i, Op in enumerate(Operators.values()): + for j in range(2**n): + # create state vector + state = np.zeros((1,2**n)) + state[0][j] = 1 + M[i, j] = np.dot(state, np.dot(Op, state.T)) + # Get ideal phase vector + states = ['{:0{}b}'.format(i, n) for i in range(2**n)] + if n_spec: + Phase_vec_ideal = np.array([s[:-n_spec].count('1')*180 for s in states]) + else: + Phase_vec_ideal = np.array([s.count('1')*180 for s in states]) + ######################################## + # Correct rotations for modulo of phase + ######################################## + state_idxs_sorted_by_exc = {i:[] for i in range(n+1)} + for i, s in enumerate(states): + if n_spec: + nr_exc = s[:-n_spec].count('1') + else: + nr_exc = s.count('1') + state_idxs_sorted_by_exc[nr_exc].append(i) + for i in range(n): + phi_0 = Phase_vec[state_idxs_sorted_by_exc[i][0]] + for idx in state_idxs_sorted_by_exc[i+1]: + while Phase_vec[idx] < phi_0: + Phase_vec[idx] += 360 + # Calculate Phase gate model coefficients + M_inv = np.linalg.inv(M) + vector_ideal = np.dot(M_inv, Phase_vec_ideal) + vector = np.dot(M_inv, Phase_vec) + + Result = {op:vector[i]-vector_ideal[i] for i, op in enumerate(Operators.keys())} + + return Result + +def Phase_model_plotfn( + ax, + q_target, + Q_control, + Q_spectator, + Phase_model, + timestamp, + **kw): + fig = ax.get_figure() + axs = fig.get_axes() + + Ops = np.array([ op for op in Phase_model.keys() ]) + + if Q_spectator: + n_spec = len(Q_spectator) + Ops_sorted = [Ops[0], Ops[1]] + Phases_sorted = [Phase_model[Ops[0]], Phase_model[Ops[1]]] + for n in range(len(Ops[0])): + for c in Ops: + if c[:-n_spec].count('Z') == n+1: + Ops_sorted.append(c) + Phases_sorted.append(Phase_model[c]) + else: + Ops_sorted = [Ops[0]] + Phases_sorted = [Phase_model[Ops[0]]] + for n in range(len(Ops[0])): + for c in Ops: + if c.count('Z') == n+1: + Ops_sorted.append(c) + Phases_sorted.append(Phase_model[c]) + + axs[0].bar(Ops_sorted, Phases_sorted, color='C0', zorder=10) + axs[0].set_xticks(Ops_sorted) + axs[0].set_xticklabels(Ops_sorted, rotation=90, fontsize=7) + axs[0].set_xlabel('Operator $U_{'+fr'{"}U_{".join(Q_control)}'+'}$') + axs[0].set_ylabel(f'Phase model coefficient error (deg)') + axs[0].grid(ls='--', zorder=0) + fig.suptitle(f'{timestamp}\nPhase gate model coefficients\n'+\ + f'{q_target} with control qubits {" ".join(Q_control)}', y=1.0) + fig.tight_layout() + + +class Parity_check_calibration_analysis(ba.BaseDataAnalysis): + """ + Analysis + """ + def __init__(self, + Q_ancilla: list, + Q_control: list, + Q_pair_target: list, + B_amps: list, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + auto=True): + + super().__init__(t_start=t_start, + t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) + self.Q_ancilla = Q_ancilla + self.Q_control = Q_control + self.Q_pair_target = Q_pair_target + self.B_amps = B_amps + if auto: + self.run_analysis() + + def extract_data(self): + self.get_timestamps() + self.timestamp = self.timestamps[0] + + data_fp = get_datafilepath_from_timestamp(self.timestamp) + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + self.raw_data_dict = h5d.extract_pars_from_datafile( + data_fp, param_spec) + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + + def process_data(self): + self.proc_data_dict = {} + self.qoi = {} + + n_c = len(self.Q_control) + Operators = [ name.decode()[-n_c:] for name in self.raw_data_dict['value_names']\ + if 'Phase_model' in name.decode() ] + Phases = self.raw_data_dict['data'][:,1:-n_c] + for q in self.Q_pair_target: + if q in self.Q_control: + q_idx = self.Q_control.index(q) + # Sort phases + Operators_sorted = [] + idx_sorted = [] + for n in range(len(Operators)+1): + for i, op in enumerate(Operators): + if op.count('Z') == n: + Operators_sorted.append(op) + idx_sorted.append(i) + Phases_sorted = Phases[:,idx_sorted] + # Fit linear curves to two body term + Two_body_phases = Phases_sorted[:,n_c-q_idx] + Single_body_phases = Phases_sorted[:,0] + from scipy.optimize import curve_fit + def func(x, A, B): + return A*x+B + popt, pcov = curve_fit(func, self.B_amps, Two_body_phases) + Opt_B = -popt[1]/popt[0] + # Fit single body phase + popt_0, pcov_0 = curve_fit(func, self.B_amps, Single_body_phases) + Phase_offset = func(Opt_B, *popt_0) + # Get Missing fraction relevant + Missing_fraction = self.raw_data_dict['data'][:,q_idx-n_c] + + self.proc_data_dict['Phases'] = Phases_sorted + self.proc_data_dict['Operators'] = Operators_sorted + self.proc_data_dict['Two_body_phases'] = Two_body_phases + self.proc_data_dict['Missing_fraction'] = Missing_fraction + self.proc_data_dict['Fit_res'] = popt + + self.qoi['Optimal_B'] = Opt_B + self.qoi['Phase_offset'] = Phase_offset + + def prepare_plots(self): + self.axs_dict = {} + fig = plt.figure(figsize=(10,4)) + axs = [fig.add_subplot(121), + fig.add_subplot(222), + fig.add_subplot(224)] + self.figs[f'Parity_check_calibration_{"_".join(self.Q_pair_target)}'] = fig + self.axs_dict['plot_1'] = axs[0] + # fig.patch.set_alpha(0) + self.plot_dicts[f'Parity_check_calibration_{"_".join(self.Q_pair_target)}']={ + 'plotfn': gate_calibration_plotfn, + 'ax_id': 'plot_1', + 'B_amps': self.B_amps, + 'Phases': self.proc_data_dict['Phases'], + 'Operators': self.proc_data_dict['Operators'], + 'Q_control': self.Q_control, + 'Q_pair_target': self.Q_pair_target, + 'Two_body_phases': self.proc_data_dict['Two_body_phases'], + 'Missing_fraction': self.proc_data_dict['Missing_fraction'], + 'Fit_res': self.proc_data_dict['Fit_res'], + 'Opt_B': self.qoi['Optimal_B'], + 'timestamp': self.timestamps[0]} + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def gate_calibration_plotfn( + ax, + B_amps, + Phases, + Operators, + Q_control, + Q_pair_target, + Two_body_phases, + Missing_fraction, + Opt_B, + Fit_res, + timestamp, + **kw): + + fig = ax.get_figure() + axs = fig.get_axes() + + def func(x, A, B): + return A*x+B + from matplotlib.cm import viridis + Colors = [ viridis(x) for x in np.linspace(0, 1, len(B_amps)) ] + + for i, b_amp in enumerate(B_amps): + axs[0].plot(Phases[i], color=Colors[i], marker='o') + axs[0].grid(ls='--') + axs[0].set_xticks(np.arange(len(Operators))) + axs[0].set_xticklabels(Operators, rotation=90) + axs[0].set_xlabel(f'Operators (${"".join(["U_{"+s+"} " for s in Q_control[::1]])}$)') + axs[0].set_ylabel('Phase error (deg)') + + axs[1].plot(B_amps, func(B_amps, *Fit_res), 'C0--') + axs[1].plot(B_amps, Two_body_phases, 'C0o') + axs[1].axhline(0, ls='--', color='k', alpha=.5) + axs[1].axvline(Opt_B, ls='--', color='k', alpha=.5, label=f'Optimal B : {Opt_B:.3f}') + axs[1].legend(frameon=False) + axs[1].set_ylabel('Phase error (deg)') + axs[1].set_xticks(B_amps) + axs[1].set_xticklabels([]) + + axs[2].plot(B_amps, Missing_fraction*100, 'C2o-') + axs[2].set_xticks(B_amps) + axs[2].set_xlabel('B values') + axs[2].set_ylabel('Missing fraction (%)') + + fig.suptitle(f'{timestamp}\nParity check phase calibration gate {" ".join(Q_pair_target)}', y=1.01) + fig.tight_layout() + + +class Parity_check_fidelity_analysis(ba.BaseDataAnalysis): + """ + Analysis + """ + def __init__(self, + Q_ancilla: str, + Q_control: list, + control_cases: list, + post_selection: bool, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + auto=True): + super().__init__(t_start=t_start, + t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) + self.Q_ancilla = Q_ancilla + self.Q_control = Q_control + self.control_cases = control_cases + self.post_selection = post_selection + if auto: + self.run_analysis() + + def extract_data(self): + self.get_timestamps() + self.timestamp = self.timestamps[0] + data_fp = get_datafilepath_from_timestamp(self.timestamp) + qubit_list = [self.Q_ancilla] + self.Q_control + _data = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + _thrs = {f'threshold_{q}': (f'Instrument settings/{q}', 'attr:ro_acq_threshold') + for q in qubit_list} + # param_spec = {**_data, **_thrs} + param_spec = {**_data} + self.raw_data_dict = h5d.extract_pars_from_datafile( + data_fp, param_spec) + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + + def process_data(self): + self.proc_data_dict = {} + self.qoi = {} + # Process data + qubit_list = [self.Q_ancilla] + if self.post_selection: + qubit_list += self.Q_control + + n = len(self.Q_control)+1 + nr_cases = len(self.control_cases) + total_shots = len(self.raw_data_dict['data'][:,0]) + nr_shots_per_case = total_shots//((1+self.post_selection)*nr_cases+2**n) + Threshold = {} + RO_fidelity = {} + # Sort calibration shots and calculate threshold + Cal_shots = { q: {} for q in qubit_list } + states = ['0','1'] + if self.post_selection: + combinations = [''.join(s) for s in itertools.product(states, repeat=n)] + else: + combinations = ['0', '1'] + for i, q in enumerate(qubit_list): + for j, comb in enumerate(combinations): + if self.post_selection: + _shots = self.raw_data_dict['data'][:,i+1] + Cal_shots[q][comb] = _shots[2*nr_cases+j::2*nr_cases+2**n] + else: + _shots = self.raw_data_dict['data'][:,i+1] + Cal_shots[q][comb] = _shots[nr_cases+j::nr_cases+2] + shots_0 = [] + shots_1 = [] + for comb in combinations: + if comb[i] == '0': + shots_0 += list(Cal_shots[q][comb]) + else: + shots_1 += list(Cal_shots[q][comb]) + def _calculate_threshold(shots_0, shots_1): + s_max = np.max(list(shots_0)+list(shots_1)) + s_min = np.min(list(shots_0)+list(shots_1)) + s_0, bins_0 = np.histogram(shots_0, bins=100, range=(s_min, s_max)) + s_1, bins_1 = np.histogram(shots_1, bins=100, range=(s_min, s_max)) + bins = (bins_0[:-1]+bins_0[1:])/2 + th_idx = np.argmax(np.cumsum(s_0) - np.cumsum(s_1)) + threshold = bins[th_idx] + return threshold + Threshold[q] = _calculate_threshold(shots_0, shots_1) + RO_fidelity[q] = \ + (np.mean([1 if s < Threshold[q] else 0 for s in shots_0])+ + np.mean([0 if s < Threshold[q] else 1 for s in shots_1]))/2 + # Sort experiment shots + Shots_raw = {} + Shots_dig = { q: {} for q in qubit_list } + PS_mask = { case : np.ones(nr_shots_per_case) + for case in self.control_cases } + for i, q in enumerate(qubit_list): + shots_raw = self.raw_data_dict['data'][:,i+1] + shots_dig = np.array([ 0 if s 0: + # Choose positive voltage + Voltage[i] = max((flux_arc-detuning).roots) + else: + # Choose negative voltage + Voltage[i] = min((flux_arc-detuning).roots) + # Trace = Voltage/np.mean(Voltage[-6:]) + Trace = Voltage/np.mean(Voltage[-1:]) + # Fit exponential to trace + if self.update_IIR: + try: + p0 = [+.001, 400e-9, 1.0085] + popt, pcov = curve_fit(filter_func, Time[8:]*1e-9, Trace[8:], p0=p0) + except: + print_exception() + print('Fit failed. Trying new initial guess') + p0 = [-.01, 2e-6, 1.003] + # try: + popt, pcov = curve_fit(filter_func, Time[6:]*1e-9, Trace[6:], p0=p0) + print('Fit converged!') + # except: + # print_exception() + # popt=p0 + # print('Fit failed') + filtr = {'amp': popt[0], 'tau': popt[1]} + self.proc_data_dict['filter_pars'] = popt + self.proc_data_dict['exponential_filter'] = filtr + # Fit high pass to trace + if self.update_IIR_high_pass: + p0 = [1.8e-3, +2e-6] + popt, pcov = curve_fit(filter_func_high_pass, + Time[100:]*1e-9, Trace[100:], p0=p0) + filtr = {'tau': popt[0]} + self.proc_data_dict['filter_pars'] = p0#popt + self.proc_data_dict['high_pass_filter'] = filtr + # Save quantities for plot + self.proc_data_dict['Time'] = Time + self.proc_data_dict['Frequencies'] = Frequencies + self.proc_data_dict['Data'] = Data + self.proc_data_dict['Center_freqs'] = Center_freqs + self.proc_data_dict['Trace'] = Trace + + def prepare_plots(self): + self.axs_dict = {} + fig, ax = plt.subplots(figsize=(4,3), dpi=200) + # fig.patch.set_alpha(0) + self.axs_dict[f'Cryoscope_long'] = ax + self.figs[f'Cryoscope_long'] = fig + self.plot_dicts['Cryoscope_long'] = { + 'plotfn': Cryoscope_long_plotfn, + 'ax_id': 'Cryoscope_long', + 'Time': self.proc_data_dict['Time'], + 'Frequencies': self.proc_data_dict['Frequencies'], + 'Data': self.proc_data_dict['Data'], + 'Center_freqs': self.proc_data_dict['Center_freqs'], + 'Trace': self.proc_data_dict['Trace'], + 'qubit': self.qubit, + 'qubit_freq': self.frequency, + 'timestamp': self.timestamps[0], + 'filter_pars': self.proc_data_dict['filter_pars'] \ + if (self.update_IIR or self.update_IIR_high_pass) else None, + } + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def Cryoscope_long_plotfn(Time, + Frequencies, + Data, + Center_freqs, + Trace, + timestamp, + qubit, + qubit_freq, + filter_pars=None, + ax=None, **kw): + fig = ax.get_figure() + # Spectroscopy plot + if Time[-1] > 2000: + _Time = Time/1e3 + else: + _Time = Time + ax.pcolormesh(_Time, Frequencies*1e-9, Data, shading='nearest') + ax.plot(_Time, Center_freqs*1e-9, 'w-', lw=1) + axt = ax.twinx() + _lim = ax.get_ylim() + _lim = (qubit_freq*1e-9-np.array(_lim))*1e3 + axt.set_ylim(_lim) + axt.set_ylabel('Detuning (MHz)') + # ax.set_xlabel('Time (ns)') + ax.set_ylabel('Frequency (GHz)') + ax.set_title('Spectroscopy of step response') + # Cryoscope trace plot + ax1 = fig.add_subplot(111) + pos = ax1.get_position() + ax1.set_position([pos.x0+1.2, pos.y0, pos.width, pos.height]) + ax1.axhline(1, color='grey', ls='-') + ax1.axhline(1.005, color='grey', ls='--') + ax1.axhline(0.995, color='grey', ls='--') + ax1.axhline(1.001, color='grey', ls=':') + ax1.axhline(0.999, color='grey', ls=':') + if filter_pars is not None: + _x = np.linspace(Time[0], Time[-1], 201) + _x_ = np.linspace(_Time[0], _Time[-1], 201) + if len(filter_pars) == 2: # High pass compenstaion filter + tau = filter_pars[0]*1e6 + ax1.plot(_x_, filter_func_high_pass(_x*1e-9,*filter_pars), 'C1--', + label=f'IIR fit ($\\tau={tau:.1f}\\mu$s)') + else: # low-pass compensation filter + tau = filter_pars[1]*1e9 + ax1.plot(_x_, filter_func(_x*1e-9,*filter_pars), 'C1--', + label=f'IIR fit ($\\tau={tau:.0f}$ns)') + ax1.legend(frameon=False) + ax1.plot(_Time, Trace) + bottom, top = ax1.get_ylim() + bottom = min(.99, bottom) + top = max(1.01, top) + ax1.set_ylim(bottom, top) + ax1.set_xlim(_Time[0], _Time[-1]) + # ax1.set_xlabel('Time (ns)') + ax1.set_ylabel('Normalized amplitude') + ax1.set_title('Reconstructed step response') + if Time[-1] > 2000: + ax.set_xlabel('Time ($\\mu$s)') + ax1.set_xlabel('Time ($\\mu$s)') + else: + ax.set_xlabel('Time (ns)') + ax1.set_xlabel('Time (ns)') + # Fig title + fig.suptitle(f'{timestamp}\n{qubit} long time-scale cryoscope', x=1.1, y=1.15) diff --git a/pycqed/analysis_v2/dac_scan_analysis.py b/pycqed/analysis_v2/dac_scan_analysis.py index b50eae8cd2..f73f305869 100644 --- a/pycqed/analysis_v2/dac_scan_analysis.py +++ b/pycqed/analysis_v2/dac_scan_analysis.py @@ -671,20 +671,22 @@ class DAC_analysis(ma.TwoD_Analysis): obtained by -b/2a (parabola = ax^2 + bx +c) """ - def __init__(self, timestamp, + def __init__(self, options_dict=None, do_fitting=True, extract_only=False, auto=True, + use_phase:bool=False, + ignore_idxs: list=[0, -1], **kw): - super(ma.TwoD_Analysis, self).__init__(timestamp=timestamp, - options_dict=options_dict, + super(ma.TwoD_Analysis, self).__init__(options_dict=options_dict, extract_only=extract_only, auto=auto, do_fitting=do_fitting, **kw) - linecut_fit_result = self.fit_linecuts() + linecut_fit_result = self.fit_linecuts(use_phase) self.linecut_fit_result = linecut_fit_result + self.ignore_idxs = ignore_idxs f0s = [] for res in self.linecut_fit_result: f0s.append(res.values['f0']) @@ -694,9 +696,12 @@ def __init__(self, timestamp, self.sweet_spot_value = self.dac_fit_res['sweetspot_dac'] self.plot_fit_result(**kw) - def fit_linecuts(self): - linecut_mag = np.array(self.measured_values)[0].T - sweep_points = self.sweep_points + def fit_linecuts(self, use_phase): + if use_phase: + linecut_mag = np.array(self.measured_values)[1].T[:] + else: + linecut_mag = np.array(self.measured_values)[0].T[:] + sweep_points = self.sweep_points[:] fit_result = [] for linecut in linecut_mag: fit_result.append(self.qubit_fit(sweep_points, linecut)) @@ -709,8 +714,8 @@ def qubit_fit(self, sweep_points, linecut_mag, **kw): Does not support 2nd peak fitting, as it does not seem necessary. """ frequency_guess = kw.get('frequency_guess', None) - percentile = kw.get('percentile', 20) - num_sigma_threshold = kw.get('num_sigma_threshold', 5) + percentile = kw.get('percentile', 2.5) + num_sigma_threshold = kw.get('num_sigma_threshold', 4) window_len_filter = kw.get('window_len_filter', 3) optimize = kw.pop('optimize', True) verbose = kw.get('verbose', False) @@ -803,9 +808,15 @@ def qubit_fit(self, sweep_points, linecut_mag, **kw): return fit_res def fit_dac_arc(self): - DAC_values = self.sweep_points_2D - f0s = self.f0s - + DAC_values = list(self.sweep_points_2D) + f0s = list(self.f0s) + self.ignore_idxs = [x if x>0 else x%len(DAC_values) for x in self.ignore_idxs] + print(self.ignore_idxs) + # remove ignored indexes + DAC_values = [x for i, x in enumerate(DAC_values) if i not in self.ignore_idxs] + f0s = [x for i, x in enumerate(f0s) if i not in self.ignore_idxs] + # DAC_values = self.sweep_points_2D + # f0s = self.f0s polycoeffs = np.polyfit(DAC_values, f0s, 2) sweetspot_dac = -polycoeffs[1]/(2*polycoeffs[0]) fit_res = {} diff --git a/pycqed/analysis_v2/measurement_analysis.py b/pycqed/analysis_v2/measurement_analysis.py index 850483e8ab..f8ae8cc1dd 100644 --- a/pycqed/analysis_v2/measurement_analysis.py +++ b/pycqed/analysis_v2/measurement_analysis.py @@ -59,6 +59,8 @@ reload(pba) import pycqed.analysis_v2.Two_qubit_gate_analysis as tqg reload(tqg) +import pycqed.analysis_v2.GBT_analysis as gbta +reload(gbta) import pycqed.analysis_v2.fluxing_analysis as fla reload(fla) @@ -72,6 +74,12 @@ import pycqed.analysis_v2.cryoscope_v2_analysis as cv2 reload(cv2) +import pycqed.analysis_v2.LRU_analysis as lrua +reload(lrua) + +import pycqed.analysis_v2.tomography_analysis as tomoa +reload(tomoa) + from pycqed.analysis_v2.simple_analysis import ( Basic1DAnalysis, Basic1DBinnedAnalysis, Basic2DAnalysis, Basic2DInterpolatedAnalysis) @@ -79,7 +87,7 @@ FlippingAnalysis, EFRabiAnalysis, DecoherenceAnalysis, Intersect_Analysis, Oscillation_Analysis, ComplexRamseyAnalysis, Crossing_Analysis, Conditional_Oscillation_Analysis, Idling_Error_Rate_Analyisis, - Grovers_TwoQubitAllStates_Analysis) + Grovers_TwoQubitAllStates_Analysis, FineBiasAnalysis) from pycqed.analysis_v2.readout_analysis import ( Singleshot_Readout_Analysis, RO_acquisition_delayAnalysis, Dispersive_shift_Analysis, Readout_landspace_Analysis) @@ -98,6 +106,8 @@ from pycqed.analysis_v2.cryo_scope_analysis import RamZFluxArc, \ SlidingPulses_Analysis, Cryoscope_Analysis +import pycqed.analysis_v2.cryo_scope_analysis_V2 +reload(pycqed.analysis_v2.cryo_scope_analysis_V2) from pycqed.analysis_v2.cryo_scope_analysis_V2 import RamZFluxArc, \ SlidingPulses_Analysis, Cryoscope_Analysis diff --git a/pycqed/analysis_v2/multi_analysis.py b/pycqed/analysis_v2/multi_analysis.py index 31b71fce44..2932dac0ee 100644 --- a/pycqed/analysis_v2/multi_analysis.py +++ b/pycqed/analysis_v2/multi_analysis.py @@ -27,15 +27,13 @@ def __init__( extract_only: bool = False, close_figs=False, do_fitting: bool = False, - auto=True, - qubits: list = None + auto=True ): super().__init__( label=label, t_start = t_start, t_stop = t_stop ) - self.qubits = qubits if auto: self.run_analysis() @@ -44,8 +42,10 @@ def extract_data(self): self.timestamps = a_tools.get_timestamps_in_range(self.t_start,self.t_stop, label = self.labels) self.raw_data_dict['timestamps'] = self.timestamps data_fp = a_tools.get_datafilepath_from_timestamp(self.timestamps[0]) - param_spec = {'data': ('Experimental Data/Data', 'dset')} + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} data = h5d.extract_pars_from_datafile(data_fp, param_spec) + self.qubits = [ name.decode().split(' ')[-1] for name in data['value_names']] self.raw_data_dict['points'] = data['data'][:,0] for i, q in enumerate(self.qubits): self.raw_data_dict['{}_data'.format(q)] = data['data'][:,i+1] diff --git a/pycqed/analysis_v2/multiplexed_readout_analysis.py b/pycqed/analysis_v2/multiplexed_readout_analysis.py index 3b4d02aa35..c6cda793f8 100644 --- a/pycqed/analysis_v2/multiplexed_readout_analysis.py +++ b/pycqed/analysis_v2/multiplexed_readout_analysis.py @@ -8,7 +8,10 @@ from collections import OrderedDict import pycqed.analysis_v2.base_analysis as ba from pycqed.analysis_v2.tools import matplotlib_utils as mpl_utils -from pycqed.analysis.analysis_toolbox import get_datafilepath_from_timestamp +from pycqed.analysis.analysis_toolbox import ( + get_datafilepath_from_timestamp, + rotate_and_normalize_data, +) from pycqed.analysis.tools.plotting import set_xlabel, set_ylabel, \ cmap_to_alpha, cmap_first_to_alpha import pycqed.analysis.tools.data_manipulation as dm_tools @@ -609,17 +612,18 @@ def prepare_plots(self): if self.q_target == None: # Run analysis for all qubits if self.post_selection is True: - self.plot_dicts['assignment_probability_matrix_post'] = { - 'plotfn': plot_assignment_prob_matrix, - 'assignment_prob_matrix': - self.proc_data_dict['Post_assignment_prob_matrix'], - 'combinations': self.proc_data_dict['combinations'], - 'valid_combinations': self.proc_data_dict['combinations'], - 'qubit_labels': qubit_labels, - 'plotsize': np.array(np.shape(\ - self.proc_data_dict['Post_assignment_prob_matrix'].T))*.8, - 'post_selection': True - } + if nr_qubits<5 : + self.plot_dicts['assignment_probability_matrix_post'] = { + 'plotfn': plot_assignment_prob_matrix, + 'assignment_prob_matrix': + self.proc_data_dict['Post_assignment_prob_matrix'], + 'combinations': self.proc_data_dict['combinations'], + 'valid_combinations': self.proc_data_dict['combinations'], + 'qubit_labels': qubit_labels, + 'plotsize': np.array(np.shape(\ + self.proc_data_dict['Post_assignment_prob_matrix'].T))*.8, + 'post_selection': True + } self.plot_dicts['cross_fid_matrix_post'] = { 'plotfn': plot_cross_fid_matrix, 'prob_matrix': @@ -631,16 +635,17 @@ def prepare_plots(self): self.proc_data_dict['Post_cross_fidelity_matrix'].T))*.8, 'post_selection': True } - self.plot_dicts['assignment_probability_matrix'] = { - 'plotfn': plot_assignment_prob_matrix, - 'assignment_prob_matrix': - self.proc_data_dict['assignment_prob_matrix'], - 'combinations': self.proc_data_dict['combinations'], - 'valid_combinations': self.proc_data_dict['combinations'], - 'qubit_labels': qubit_labels, - 'plotsize': np.array(np.shape(\ - self.proc_data_dict['assignment_prob_matrix'].T))*.8 - } + if nr_qubits<5 : + self.plot_dicts['assignment_probability_matrix'] = { + 'plotfn': plot_assignment_prob_matrix, + 'assignment_prob_matrix': + self.proc_data_dict['assignment_prob_matrix'], + 'combinations': self.proc_data_dict['combinations'], + 'valid_combinations': self.proc_data_dict['combinations'], + 'qubit_labels': qubit_labels, + 'plotsize': np.array(np.shape(\ + self.proc_data_dict['assignment_prob_matrix'].T))*.8 + } self.plot_dicts['cross_fid_matrix'] = { 'plotfn': plot_cross_fid_matrix, 'prob_matrix': @@ -724,6 +729,7 @@ def prepare_plots(self): 'threshold': \ self.proc_data_dict['Post_PDF_data'][ch]['threshold_raw'], 'timestamp': self.timestamp, + 'legend':False, 'qoi': self.qoi[ch], 'post_selection': True } @@ -782,6 +788,7 @@ def prepare_plots(self): 'threshold': \ self.proc_data_dict['PDF_data'][ch]['threshold_raw'], 'timestamp': self.timestamp, + 'legend':False, 'qoi': self.qoi[ch] } @@ -862,6 +869,7 @@ def prepare_plots(self): 'threshold': \ self.proc_data_dict['Post_PDF_data'][q_target_ch]['threshold_raw'], 'timestamp': self.timestamp, + 'legend':False, 'qoi': self.qoi[q_target_ch], 'post_selection':True } @@ -935,6 +943,7 @@ def prepare_plots(self): 'threshold': \ self.proc_data_dict['PDF_data'][q_target_ch]['threshold_raw'], 'timestamp': self.timestamp, + 'legend':False, 'qoi': self.qoi[q_target_ch] } @@ -988,7 +997,7 @@ def extract_data(self): def process_data(self): - length = int(len(self.raw_data_dict['data'][:, 0])/2) + length = int(len(self.raw_data_dict['data'][:, 0])) self.proc_data_dict['Time_data'] = np.arange(length)/1.8e9 self.proc_data_dict['Channel_0_data'] = self.raw_data_dict['data'][:, 1][:length] self.proc_data_dict['Channel_1_data'] = self.raw_data_dict['data'][:, 2][:length] @@ -1475,6 +1484,7 @@ def run_post_extract(self): close_figs=self.options_dict.get('close_figs', True), tag_tstamp=self.options_dict.get('tag_tstamp', True)) + class RTE_analysis(ba.BaseDataAnalysis): """ """ @@ -1732,7 +1742,8 @@ def run_post_extract(self): close_figs=self.options_dict.get('close_figs', True), tag_tstamp=self.options_dict.get('tag_tstamp', True)) -class measurement_QND_analysis(ba.BaseDataAnalysis): + +class Readout_sweep_analysis(ba.BaseDataAnalysis): """ This analysis extracts measurement QND metrics For details on the procedure see: @@ -1740,6 +1751,8 @@ class measurement_QND_analysis(ba.BaseDataAnalysis): """ def __init__(self, qubit:str, + frequencies: list, + amplitudes: list, t_start: str = None, t_stop: str = None, label: str = '', @@ -1747,14 +1760,13 @@ def __init__(self, extract_only: bool = False, auto=True ): - super().__init__(t_start=t_start, t_stop=t_stop, label=label, options_dict=options_dict, extract_only=extract_only) - self.qubit = qubit - + self.frequencies = frequencies + self.amplitudes = amplitudes if auto: self.run_analysis() @@ -1765,68 +1777,93 @@ def extract_data(self): """ self.get_timestamps() self.timestamp = self.timestamps[0] - data_fp = get_datafilepath_from_timestamp(self.timestamp) param_spec = {'data': ('Experimental Data/Data', 'dset'), 'value_names': ('Experimental Data', 'attr:value_names')} - self.raw_data_dict = h5d.extract_pars_from_datafile( data_fp, param_spec) - # Parts added to be compatible with base analysis data requirements self.raw_data_dict['timestamps'] = self.timestamps self.raw_data_dict['folder'] = os.path.split(data_fp)[0] def process_data(self): - Cal_0, Cal_1 = (self.raw_data_dict['data'][3::5,1], self.raw_data_dict['data'][3::5,2]), (self.raw_data_dict['data'][4::5,1], self.raw_data_dict['data'][4::5,2]) - M1, M2, M3 = self.raw_data_dict['data'][0::5,1], self.raw_data_dict['data'][1::5,1], self.raw_data_dict['data'][2::5,1] - th = estimate_threshold(Cal_0[0], Cal_1[0]) - M1_dig = np.array([ 0 if m}$" - for i in range(num_cal_pnts) - ], - "marker": "d", - "line_kws": {"markersize": 14, "markeredgecolor": "white"}, - "do_legend": True, - # "legend_title": "Calibration points", - "legend_ncol": 3, - "linestyle": "", - } + # num_cal_pnts = len(pdd["cal_triangle"]) + # fig_id_RB_on_IQ = "rb_on_iq_{}".format(val_name_I) + # for ax_id in [fig_id_hex, fig_id_RB_on_IQ]: + # self.plot_dicts[ax_id + "_cal_pnts"] = { + # "plotfn": self.plot_line, + # "ax_id": ax_id, + # "xvals": pdd["cal_triangle"].T[0].reshape(num_cal_pnts, 1), + # "yvals": pdd["cal_triangle"].T[1].reshape(num_cal_pnts, 1), + # "setlabel": [ + # r"V$_{\left |" + str(i) + r"\right >}$" + # for i in range(num_cal_pnts) + # ], + # "marker": "d", + # "line_kws": {"markersize": 14, "markeredgecolor": "white"}, + # "do_legend": True, + # # "legend_title": "Calibration points", + # "legend_ncol": 3, + # "linestyle": "", + # } # define figure and axes here to have custom layout - self.figs[fig_id_RB_on_IQ], axs = plt.subplots( - ncols=2, figsize=(fs[0] * 2.0, fs[1]) - ) - self.figs[fig_id_RB_on_IQ].patch.set_alpha(0) - self.axs[fig_id_RB_on_IQ] = axs[0] - fig_id_RB_on_IQ_det = fig_id_RB_on_IQ + "_detailed" - self.axs[fig_id_RB_on_IQ_det] = axs[1] - axs[1].yaxis.set_label_position("right") - axs[1].yaxis.tick_right() - - close_triangle = list(range(num_cal_pnts)) + [0] - self.plot_dicts[fig_id_RB_on_IQ] = { - "ax_id": fig_id_RB_on_IQ, - "plotfn": self.plot_line, - "xvals": pdd["cal_triangle"].T[0][close_triangle], - "yvals": pdd["cal_triangle"].T[1][close_triangle], - "xlabel": val_names[self.rates_I_quad_ch_idx], - "xunit": rdd["value_units"][0], - "ylabel": val_names[self.rates_Q_quad_ch_idx], - "yunit": rdd["value_units"][1], - "title": rdd["timestamp_string"] - + "\n" - + rdd["measurementstring"] - + " hexbin plot", - "marker": "", - "color": "black", - "line_kws": {"linewidth": 1}, - "setlabel": "NONE", - } + # self.figs[fig_id_RB_on_IQ], axs = plt.subplots( + # ncols=2, figsize=(fs[0] * 2.0, fs[1]) + # ) + # self.figs[fig_id_RB_on_IQ].patch.set_alpha(0) + # self.axs[fig_id_RB_on_IQ] = axs[0] + # fig_id_RB_on_IQ_det = fig_id_RB_on_IQ + "_detailed" + # self.axs[fig_id_RB_on_IQ_det] = axs[1] + # axs[1].yaxis.set_label_position("right") + # axs[1].yaxis.tick_right() + + # close_triangle = list(range(num_cal_pnts)) + [0] + # self.plot_dicts[fig_id_RB_on_IQ] = { + # "ax_id": fig_id_RB_on_IQ, + # "plotfn": self.plot_line, + # "xvals": pdd["cal_triangle"].T[0][close_triangle], + # "yvals": pdd["cal_triangle"].T[1][close_triangle], + # "xlabel": val_names[self.rates_I_quad_ch_idx], + # "xunit": rdd["value_units"][0], + # "ylabel": val_names[self.rates_Q_quad_ch_idx], + # "yunit": rdd["value_units"][1], + # "title": rdd["timestamp_string"] + # + "\n" + # + rdd["measurementstring"] + # + " hexbin plot", + # "marker": "", + # "color": "black", + # "line_kws": {"linewidth": 1}, + # "setlabel": "NONE", + # } - self.plot_dicts[fig_id_RB_on_IQ_det] = { - "ax_id": fig_id_RB_on_IQ_det, - "plotfn": self.plot_line, - "xvals": pdd["cal_triangle"].T[0][:2], - "yvals": pdd["cal_triangle"].T[1][:2], - "xlabel": val_names[self.rates_I_quad_ch_idx], - "xunit": rdd["value_units"][0], - "ylabel": val_names[self.rates_Q_quad_ch_idx], - "yunit": rdd["value_units"][1], - "title": r"Detailed view", - "marker": "", - "color": "black", - "line_kws": {"linewidth": 1}, - "setlabel": "NONE", - } + # self.plot_dicts[fig_id_RB_on_IQ_det] = { + # "ax_id": fig_id_RB_on_IQ_det, + # "plotfn": self.plot_line, + # "xvals": pdd["cal_triangle"].T[0][:2], + # "yvals": pdd["cal_triangle"].T[1][:2], + # "xlabel": val_names[self.rates_I_quad_ch_idx], + # "xunit": rdd["value_units"][0], + # "ylabel": val_names[self.rates_Q_quad_ch_idx], + # "yunit": rdd["value_units"][1], + # "title": r"Detailed view", + # "marker": "", + # "color": "black", + # "line_kws": {"linewidth": 1}, + # "setlabel": "NONE", + # } val_name_Q = rdd["value_names"][self.rates_Q_quad_ch_idx] rb_SI = (pdd["SI"][val_name_I], pdd["SI"][val_name_Q]) @@ -571,40 +575,40 @@ def prepare_plots(self, fit_input_tag: str = None): cols = ["royalblue", "dodgerblue", "red", "salmon"] mks = [8, 4, 8, 4] - for ax_id, do_legend in zip( - [fig_id_RB_on_IQ, fig_id_RB_on_IQ_det], [True, False] - ): - for S, col, mk_size, ID, label in zip(sigs, cols, mks, ids, labels): - self.plot_dicts[ax_id + "_{}".format(ID)] = { - "plotfn": self.plot_line, - "ax_id": ax_id, - "xvals": S[0], - "yvals": S[1], - "setlabel": label, - "marker": "o", - "line_kws": {"markersize": mk_size}, - "color": col, - "do_legend": do_legend, - "legend_ncol": 3, - "linestyle": "", - } - - for idx in [self.rates_I_quad_ch_idx, self.rates_Q_quad_ch_idx]: - val_name = rdd["value_names"][idx] - self.plot_dicts["raw_RB_curve_data_{}".format(val_name)] = { - "plotfn": plot_raw_RB_curve, - "ncl": pdd["ncl"], - "SI": pdd["SI"][val_name], - "SX": pdd["SX"][val_name], - "V0": pdd["V0"][val_name], - "V1": pdd["V1"][val_name], - "V2": pdd["V2"][val_name], - "xlabel": "Number of Cliffords", - "xunit": "#", - "ylabel": val_name, - "yunit": pdd["value_units"][idx], - "title": pdd["timestamp_string"] + "\n" + pdd["measurementstring"], - } + # for ax_id, do_legend in zip( + # [fig_id_RB_on_IQ, fig_id_RB_on_IQ_det], [True, False] + # ): + # for S, col, mk_size, ID, label in zip(sigs, cols, mks, ids, labels): + # self.plot_dicts[ax_id + "_{}".format(ID)] = { + # "plotfn": self.plot_line, + # "ax_id": ax_id, + # "xvals": S[0], + # "yvals": S[1], + # "setlabel": label, + # "marker": "o", + # "line_kws": {"markersize": mk_size}, + # "color": col, + # "do_legend": do_legend, + # "legend_ncol": 3, + # "linestyle": "", + # } + + # for idx in [self.rates_I_quad_ch_idx, self.rates_Q_quad_ch_idx]: + # val_name = rdd["value_names"][idx] + # self.plot_dicts["raw_RB_curve_data_{}".format(val_name)] = { + # "plotfn": plot_raw_RB_curve, + # "ncl": pdd["ncl"], + # "SI": pdd["SI"][val_name], + # "SX": pdd["SX"][val_name], + # "V0": pdd["V0"][val_name], + # "V1": pdd["V1"][val_name], + # "V2": pdd["V2"][val_name], + # "xlabel": "Number of Cliffords", + # "xunit": "#", + # "ylabel": val_name, + # "yunit": pdd["value_units"][idx], + # "title": pdd["timestamp_string"] + "\n" + pdd["measurementstring"], + # } self.plot_dicts["rb_rate_eq_pops_{}".format(val_name_I)] = { "plotfn": plot_populations_RB_curve, diff --git a/pycqed/analysis_v2/readout_analysis.py b/pycqed/analysis_v2/readout_analysis.py index 01673f4fde..e86005640e 100644 --- a/pycqed/analysis_v2/readout_analysis.py +++ b/pycqed/analysis_v2/readout_analysis.py @@ -1,37 +1,51 @@ """ -File containing analyses for readout. -This includes - - readout discrimination analysis - - single shot readout analysis - - multiplexed readout analysis (to be updated!) - -Originally written by Adriaan, updated/rewritten by Rene May 2018 +File containing analyses for single qubit readout. """ import itertools from copy import deepcopy -from collections import OrderedDict +import os import matplotlib.pyplot as plt -from mpl_toolkits.axes_grid1 import make_axes_locatable import lmfit +from collections import OrderedDict import numpy as np -from scipy.optimize import minimize -import scipy.constants as spconst - import pycqed.analysis.fitting_models as fit_mods from pycqed.analysis.fitting_models import ro_gauss, ro_CDF, ro_CDF_discr, gaussian_2D, gauss_2D_guess, gaussianCDF, ro_double_gauss_guess import pycqed.analysis.analysis_toolbox as a_tools import pycqed.analysis_v2.base_analysis as ba import pycqed.analysis_v2.simple_analysis as sa +from scipy.optimize import minimize from pycqed.analysis.tools.plotting import SI_val_to_msg_str, \ set_xlabel, set_ylabel, set_cbarlabel, flex_colormesh_plot_vs_xy from pycqed.analysis_v2.tools.plotting import scatter_pnts_overlay +from mpl_toolkits.axes_grid1 import make_axes_locatable import pycqed.analysis.tools.data_manipulation as dm_tools from pycqed.utilities.general import int2base from pycqed.utilities.general import format_value_string +from pycqed.analysis.analysis_toolbox import get_datafilepath_from_timestamp +import pycqed.measurement.hdf5_data as h5d +import matplotlib.patches as patches +import matplotlib.pylab as pl +from pycqed.analysis.tools.plotting import cmap_to_alpha, cmap_first_to_alpha +from matplotlib.colors import LinearSegmentedColormap +import pathlib +from copy import copy, deepcopy +from typing import List +from itertools import repeat +from warnings import warn -class Singleshot_Readout_Analysis(ba.BaseDataAnalysis): +import xarray as xr +from scipy import optimize as opt + +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis +from sklearn.metrics import mean_absolute_error, mean_squared_error + +from pycqed.utilities.general import int2base +from pycqed.utilities.general import format_value_string + +# This analysis is deprecated +class Singleshot_Readout_Analysis_old(ba.BaseDataAnalysis): def __init__(self, t_start: str=None, t_stop: str=None, label: str='', do_fitting: bool = True, @@ -773,15 +787,14 @@ def prepare_plots(self): fit_text += '\n\n(Single quadrature data)' fit_text += '\n\nTotal shots: %d+%d' % (*self.proc_data_dict['nr_shots'],) - if self.predict_qubit_temp: - h = spconst.value('Planck constant') - kb = spconst.value('Boltzmann constant') + h = 6.62607004e-34 + kb = 1.38064852e-23 res_exc = a_sp.value - T_eff = h*self.qubit_freq/(kb*np.log((1-res_exc)/res_exc)) - fit_text += '\n\nQubit $T_{eff}$' \ - + ' = {:.2f} mK\n @ {:.3f} GHz' \ - .format(T_eff*1e3, self.qubit_freq*1e-9) + effective_temp = h*6.42e9/(kb*np.log((1-res_exc)/res_exc)) + fit_text += '\n\nQubit '+'$T_{eff}$'+\ + ' = {:.2f} mK\n@{:.0f}'.format(effective_temp*1e3, + self.qubit_freq) for ax in ['cdf', '1D_histogram']: self.plot_dicts['text_msg_' + ax] = { @@ -793,6 +806,1098 @@ def prepare_plots(self): 'text_string': fit_text, } +def get_shots_zero_one(data, post_select: bool=False, + nr_samples: int=2, sample_0: int=0, sample_1: int=1, + post_select_threshold: float = None): + if not post_select: + shots_0, shots_1 = a_tools.zigzag( + data, sample_0, sample_1, nr_samples) + else: + presel_0, presel_1 = a_tools.zigzag( + data, sample_0, sample_1, nr_samples) + + shots_0, shots_1 = a_tools.zigzag( + data, sample_0+1, sample_1+1, nr_samples) + + if post_select: + post_select_shots_0 = data[0::nr_samples] + shots_0 = data[1::nr_samples] + + post_select_shots_1 = data[nr_samples//2::nr_samples] + shots_1 = data[nr_samples//2+1::nr_samples] + + # Determine shots to remove + post_select_indices_0 = dm_tools.get_post_select_indices( + thresholds=[post_select_threshold], + init_measurements=[post_select_shots_0]) + + post_select_indices_1 = dm_tools.get_post_select_indices( + thresholds=[post_select_threshold], + init_measurements=[post_select_shots_1]) + + shots_0[post_select_indices_0] = np.nan + shots_0 = shots_0[~np.isnan(shots_0)] + + shots_1[post_select_indices_1] = np.nan + shots_1 = shots_1[~np.isnan(shots_1)] + + return shots_0, shots_1 + +def plot_2D_ssro_histogram(xvals, yvals, zvals, xlabel, xunit, ylabel, yunit, zlabel, zunit, + xlim=None, ylim=None, + title='', + cmap='viridis', + cbarwidth='10%', + cbarpad='5%', + no_label=False, + ax=None, cax=None, **kw): + if ax is None: + f, ax = plt.subplots() + if not no_label: + ax.set_title(title) + + # Plotting the "heatmap" + out = flex_colormesh_plot_vs_xy(xvals, yvals, zvals, ax=ax, + plot_cbar=True, cmap=cmap) + # Adding the colorbar + if cax is None: + ax.ax_divider = make_axes_locatable(ax) + ax.cax = ax.ax_divider.append_axes( + 'right', size=cbarwidth, pad=cbarpad) + else: + ax.cax = cax + ax.cbar = plt.colorbar(out['cmap'], cax=ax.cax) + + # Setting axis limits aspect ratios and labels + ax.set_aspect(1) + set_xlabel(ax, xlabel, xunit) + set_ylabel(ax, ylabel, yunit) + set_cbarlabel(ax.cbar, zlabel, zunit) + if xlim is None: + xlim = np.min([xvals, yvals]), np.max([xvals, yvals]) + ax.set_xlim(xlim) + if ylim is None: + ylim = np.min([xvals, yvals]), np.max([xvals, yvals]) + ax.set_ylim(ylim) + + +def _decision_boundary_points(coefs, intercepts): + ''' + Find points along the decision boundaries of + LinearDiscriminantAnalysis (LDA). + This is performed by finding the interception + of the bounds of LDA. For LDA, these bounds are + encoded in the coef_ and intercept_ parameters + of the classifier. + Each bound is given by the equation: + y + coef_i[0]/coef_i[1]*x + intercept_i = 0 + Note this only works for LinearDiscriminantAnalysis. + Other classifiers might have diferent bound models. + ''' + points = {} + # Cycle through model coeficientsand intercepts. + # 2-state classifier + if len(intercepts) == 1: + m = -coefs[0][0] / coefs[0][1] + _X = np.array([-10, 10]) + _Y = m*_X - intercepts[0]/coefs[0][1] + points['left'] = np.array([_X[0], _Y[0]]) + points['right'] = np.array([_X[1], _Y[1]]) + # 3-state classifier + elif len(intercepts) == 3: + for i, j in [[0,1], [1,2], [0,2]]: + c_i = coefs[i] + int_i = intercepts[i] + c_j = coefs[j] + int_j = intercepts[j] + x = (- int_j/c_j[1] + int_i/c_i[1])/(-c_i[0]/c_i[1] + c_j[0]/c_j[1]) + y = -c_i[0]/c_i[1]*x - int_i/c_i[1] + points[f'{i}{j}'] = (x, y) + # Find mean point + points['mean'] = np.mean([ [x, y] for (x, y) in points.values()], axis=0) + # 4-state classifier + elif len(intercepts) == 4: + for i, j in [[0,1], [1,2], [2,3], [3,0]]: + c_i = coefs[i] + int_i = intercepts[i] + c_j = coefs[j] + int_j = intercepts[j] + x = (- int_j/c_j[1] + int_i/c_i[1])/(-c_i[0]/c_i[1] + c_j[0]/c_j[1]) + y = -c_i[0]/c_i[1]*x - int_i/c_i[1] + points[f'{i}{j}'] = (x, y) + else: + raise ValueError('No valid data input.') + return points + +class Singleshot_Readout_Analysis(ba.BaseDataAnalysis): + """ + Analysis for single-shot readout experiment + updated in September 2022 (Jorge). + This new analysis now supports post-selection + with two quadratures and 3 state readout. + """ + def __init__(self, + qubit: str, + qubit_freq: float, + heralded_init: bool, + f_state: bool = False, + h_state: bool = False, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + auto=True + ): + + super().__init__(t_start=t_start, t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) + + self.qubit = qubit + self.heralded_init = heralded_init + self.qubit_freq = qubit_freq + self.f_state = f_state + self.h_state = h_state + + if auto: + self.run_analysis() + + def extract_data(self): + """ + This is a new style (sept 2019) data extraction. + This could at some point move to a higher level class. + """ + self.get_timestamps() + self.timestamp = self.timestamps[0] + data_fp = get_datafilepath_from_timestamp(self.timestamp) + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + self.raw_data_dict = h5d.extract_pars_from_datafile( + data_fp, param_spec) + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + + def process_data(self): + # Perform measurement post-selection + _cycle = 2 + if self.f_state: + _cycle += 1 + if self.h_state: + _cycle += 1 + if self.heralded_init: + _cycle *= 2 + ############################################ + # Rotate shots when data in two quadratures + ############################################ + if self.raw_data_dict['data'].shape[1] == 3: + # Sort shots + _raw_shots = self.raw_data_dict['data'][:,1:] + if self.heralded_init: + _shots_0 = _raw_shots[1::_cycle] + _shots_1 = _raw_shots[3::_cycle] + if self.f_state: + _shots_2 = _raw_shots[5::_cycle] + self.proc_data_dict['shots_2_IQ'] = _shots_2 + if self.h_state: + _shots_3 = _raw_shots[7::_cycle] + self.proc_data_dict['shots_3_IQ'] = _shots_3 + else: + _shots_0 = _raw_shots[0::_cycle] + _shots_1 = _raw_shots[1::_cycle] + if self.f_state: + _shots_2 = _raw_shots[2::_cycle] + self.proc_data_dict['shots_2_IQ'] = _shots_2 + if self.h_state: + _shots_3 = _raw_shots[3::_cycle] + self.proc_data_dict['shots_3_IQ'] = _shots_3 + # Save raw shots + self.proc_data_dict['shots_0_IQ'] = _shots_0 + self.proc_data_dict['shots_1_IQ'] = _shots_1 + # Rotate data along 01 + center_0 = np.array([np.mean(_shots_0[:,0]), np.mean(_shots_0[:,1])]) + center_1 = np.array([np.mean(_shots_1[:,0]), np.mean(_shots_1[:,1])]) + def rotate_and_center_data(I, Q, vec0, vec1, phi=0): + vector = vec1-vec0 + angle = np.arctan(vector[1]/vector[0]) + rot_matrix = np.array([[ np.cos(-angle+phi),-np.sin(-angle+phi)], + [ np.sin(-angle+phi), np.cos(-angle+phi)]]) + proc = np.array((I, Q)) + proc = np.dot(rot_matrix, proc) + return proc.transpose() + raw_shots = rotate_and_center_data(_raw_shots[:,0], _raw_shots[:,1], center_0, center_1) + else: + # Remove shot number + raw_shots = self.raw_data_dict['data'][:,1:] + ##################################################### + # From this point onward raw shots has shape + # (nr_shots, nr_quadratures). + # Post select based on heralding measurement result. + ##################################################### + if self.heralded_init: + # estimate post-selection threshold + shots_0 = raw_shots[1::_cycle, 0] + shots_1 = raw_shots[3::_cycle, 0] + ps_th = (np.mean(shots_0)+np.mean(shots_1))/2 + # Sort heralding shots from experiment shots + ps_shots = raw_shots[0::2,0] # only I quadrature needed for postselection + exp_shots = raw_shots[1::2] # Here we want to keep both quadratures + # create post-selection mask + _mask = [ 1 if s contains post-selected + # shots of state and has shape (nr_ps_shots, nr_quadtrs). + # Next we will analyze shots projected along axis and + # therefore use a single quadrature. shots_ will be used + # to denote that array of shots. + ############################################################## + # Analyse data in quadrature of interest + # (01 projection axis) + ############################################################## + shots_0 = Shots_0[:,0] + shots_1 = Shots_1[:,0] + # total number of shots (after postselection) + n_shots_0 = len(shots_0) + n_shots_1 = len(shots_1) + # find range + _all_shots = np.concatenate((shots_0, shots_1)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x0, n0 = np.unique(shots_0, return_counts=True) + x1, n1 = np.unique(shots_1, return_counts=True) + # Calculate fidelity and optimal threshold + def _calculate_fid_and_threshold(x0, n0, x1, n1): + """ + Calculate fidelity and threshold from histogram data: + x0, n0 is the histogram data of shots 0 (value and occurences), + x1, n1 is the histogram data of shots 1 (value and occurences). + """ + # Build cumulative histograms of shots 0 + # and 1 in common bins by interpolation. + all_x = np.unique(np.sort(np.concatenate((x0, x1)))) + cumsum0, cumsum1 = np.cumsum(n0), np.cumsum(n1) + ecumsum0 = np.interp(x=all_x, xp=x0, fp=cumsum0, left=0) + necumsum0 = ecumsum0/np.max(ecumsum0) + ecumsum1 = np.interp(x=all_x, xp=x1, fp=cumsum1, left=0) + necumsum1 = ecumsum1/np.max(ecumsum1) + # Calculate optimal threshold and fidelity + F_vs_th = (1-(1-abs(necumsum0 - necumsum1))/2) + opt_idxs = np.argwhere(F_vs_th == np.amax(F_vs_th)) + opt_idx = int(round(np.average(opt_idxs))) + F_assignment_raw = F_vs_th[opt_idx] + threshold_raw = all_x[opt_idx] + return F_assignment_raw, threshold_raw + Fid_raw, threshold_raw = _calculate_fid_and_threshold(x0, n0, x1, n1) + ###################### + # Fit data + ###################### + def _fit_double_gauss(x_vals, hist_0, hist_1): + ''' + Fit two histograms to a double gaussian with + common parameters. From fitted parameters, + calculate SNR, Pe0, Pg1, Teff, Ffit and Fdiscr. + ''' + from scipy.optimize import curve_fit + # Double gaussian model for fitting + def _gauss_pdf(x, x0, sigma): + return np.exp(-((x-x0)/sigma)**2/2) + global double_gauss + def double_gauss(x, x0, x1, sigma0, sigma1, A, r): + _dist0 = A*( (1-r)*_gauss_pdf(x, x0, sigma0) + r*_gauss_pdf(x, x1, sigma1) ) + return _dist0 + # helper function to simultaneously fit both histograms with common parameters + def _double_gauss_joint(x, x0, x1, sigma0, sigma1, A0, A1, r0, r1): + _dist0 = double_gauss(x, x0, x1, sigma0, sigma1, A0, r0) + _dist1 = double_gauss(x, x1, x0, sigma1, sigma0, A1, r1) + return np.concatenate((_dist0, _dist1)) + # Guess for fit + pdf_0 = hist_0/np.sum(hist_0) # Get prob. distribution + pdf_1 = hist_1/np.sum(hist_1) # + _x0_guess = np.sum(x_vals*pdf_0) # calculate mean + _x1_guess = np.sum(x_vals*pdf_1) # + _sigma0_guess = np.sqrt(np.sum((x_vals-_x0_guess)**2*pdf_0)) # calculate std + _sigma1_guess = np.sqrt(np.sum((x_vals-_x1_guess)**2*pdf_1)) # + _r0_guess = 0.01 + _r1_guess = 0.05 + _A0_guess = np.max(hist_0) + _A1_guess = np.max(hist_1) + p0 = [_x0_guess, _x1_guess, _sigma0_guess, _sigma1_guess, _A0_guess, _A1_guess, _r0_guess, _r1_guess] + # Bounding parameters + _x0_bound = (-np.inf,np.inf) + _x1_bound = (-np.inf,np.inf) + _sigma0_bound = (0,np.inf) + _sigma1_bound = (0,np.inf) + _r0_bound = (0,1) + _r1_bound = (0,1) + _A0_bound = (0,np.inf) + _A1_bound = (0,np.inf) + bounds = np.array([_x0_bound, _x1_bound, _sigma0_bound, _sigma1_bound, _A0_bound, _A1_bound, _r0_bound, _r1_bound]) + # Fit parameters within bounds + popt, pcov = curve_fit( + _double_gauss_joint, bin_centers, + np.concatenate((hist_0, hist_1)), + p0=p0, bounds=bounds.transpose()) + popt0 = popt[[0,1,2,3,4,6]] + popt1 = popt[[1,0,3,2,5,7]] + # Calculate quantities of interest + SNR = abs(popt0[0] - popt1[0])/((abs(popt0[2])+abs(popt1[2]))/2) + P_e0 = popt0[5]*popt0[2]/(popt0[2]*popt0[5] + popt0[3]*(1-popt0[5])) + P_g1 = popt1[5]*popt1[2]/(popt1[2]*popt1[5] + popt1[3]*(1-popt1[5])) + # Effective qubit temperature + h = 6.62607004e-34 + kb = 1.38064852e-23 + T_eff = h*self.qubit_freq/(kb*np.log((1-P_e0)/P_e0)) + # Fidelity from fit + _x_data = np.linspace(*_range, 10001) + _h0 = double_gauss(_x_data, *popt0)# compute distrubition from + _h1 = double_gauss(_x_data, *popt1)# fitted parameters. + Fid_fit, threshold_fit = _calculate_fid_and_threshold(_x_data, _h0, _x_data, _h1) + # Discrimination fidelity + _h0 = double_gauss(_x_data, *popt0[:-1], 0)# compute distrubition without residual + _h1 = double_gauss(_x_data, *popt1[:-1], 0)# excitation of relaxation. + Fid_discr, threshold_discr = _calculate_fid_and_threshold(_x_data, _h0, _x_data, _h1) + # return results + qoi = { 'SNR': SNR, + 'P_e0': P_e0, 'P_g1': P_g1, + 'T_eff': T_eff, + 'Fid_fit': Fid_fit, 'Fid_discr': Fid_discr } + return popt0, popt1, qoi + # Histogram of shots for 0 and 1 + h0, bin_edges = np.histogram(shots_0, bins=100, range=_range) + h1, bin_edges = np.histogram(shots_1, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt0, popt1, params_01 = _fit_double_gauss(bin_centers, h0, h1) + # Save data in processed data dictionary + self.proc_data_dict['n_shots_0'] = n_shots_0 + self.proc_data_dict['n_shots_1'] = n_shots_1 + self.proc_data_dict['bin_centers'] = bin_centers + self.proc_data_dict['h0'] = h0 + self.proc_data_dict['h1'] = h1 + self.proc_data_dict['popt0'] = popt0 + self.proc_data_dict['popt1'] = popt1 + self.proc_data_dict['threshold_raw'] = threshold_raw + self.proc_data_dict['F_assignment_raw'] = Fid_raw + self.proc_data_dict['F_fit'] = params_01['Fid_fit'] + self.proc_data_dict['F_discr'] = params_01['Fid_discr'] + self.proc_data_dict['residual_excitation'] = params_01['P_e0'] + self.proc_data_dict['relaxation_events'] = params_01['P_g1'] + self.proc_data_dict['effective_temperature'] = params_01['T_eff'] + # Save quantities of interest + self.qoi = {} + self.qoi['SNR'] = params_01['SNR'] + self.qoi['F_a'] = Fid_raw + self.qoi['F_d'] = params_01['Fid_discr'] + self.proc_data_dict['quantities_of_interest'] = self.qoi + ############################################ + # If second state data is use classifier + # to assign states in the IQ plane and + # calculate qutrit fidelity. + ############################################ + if self.f_state: + # Parse data for classifier + data = np.concatenate((Shots_0, Shots_1, Shots_2)) + labels = [0 for s in Shots_0]+[1 for s in Shots_1]+[2 for s in Shots_2] + from sklearn.discriminant_analysis import LinearDiscriminantAnalysis + clf = LinearDiscriminantAnalysis() + clf.fit(data, labels) + dec_bounds = _decision_boundary_points(clf.coef_, clf.intercept_) + Fid_dict = {} + for state, shots in zip([ '0', '1', '2'], + [Shots_0, Shots_1, Shots_2]): + _res = clf.predict(shots) + _fid = np.mean(_res == int(state)) + Fid_dict[state] = _fid + Fid_dict['avg'] = np.mean([f for f in Fid_dict.values()]) + # Get assignment fidelity matrix + M = np.zeros((3,3)) + for i, shots in enumerate([Shots_0, Shots_1, Shots_2]): + for j, state in enumerate(['0', '1', '2']): + _res = clf.predict(shots) + M[i][j] = np.mean(_res == int(state)) + self.proc_data_dict['classifier'] = clf + self.proc_data_dict['dec_bounds'] = dec_bounds + self.proc_data_dict['Fid_dict'] = Fid_dict + self.qoi['Fid_dict'] = Fid_dict + self.qoi['Assignment_matrix'] = M + ######################################### + # Project data along axis perpendicular + # to the decision boundaries. + ######################################### + ############################ + # Projection along 10 axis. + ############################ + # Rotate shots over 01 decision boundary axis + shots_0 = rotate_and_center_data(Shots_0[:,0],Shots_0[:,1], dec_bounds['mean'], dec_bounds['01'], phi=np.pi/2) + shots_1 = rotate_and_center_data(Shots_1[:,0],Shots_1[:,1], dec_bounds['mean'], dec_bounds['01'], phi=np.pi/2) + # Take relavant quadrature + shots_0 = shots_0[:,0] + shots_1 = shots_1[:,0] + n_shots_1 = len(shots_1) + # find range + _all_shots = np.concatenate((shots_0, shots_1)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x0, n0 = np.unique(shots_0, return_counts=True) + x1, n1 = np.unique(shots_1, return_counts=True) + Fid_01, threshold_01 = _calculate_fid_and_threshold(x0, n0, x1, n1) + # Histogram of shots for 1 and 2 + h0, bin_edges = np.histogram(shots_0, bins=100, range=_range) + h1, bin_edges = np.histogram(shots_1, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt0, popt1, params_01 = _fit_double_gauss(bin_centers, h0, h1) + # Save processed data + self.proc_data_dict['projection_01'] = {} + self.proc_data_dict['projection_01']['h0'] = h0 + self.proc_data_dict['projection_01']['h1'] = h1 + self.proc_data_dict['projection_01']['bin_centers'] = bin_centers + self.proc_data_dict['projection_01']['popt0'] = popt0 + self.proc_data_dict['projection_01']['popt1'] = popt1 + self.proc_data_dict['projection_01']['SNR'] = params_01['SNR'] + self.proc_data_dict['projection_01']['Fid'] = Fid_01 + self.proc_data_dict['projection_01']['threshold'] = threshold_01 + ############################ + # Projection along 12 axis. + ############################ + # Rotate shots over 12 decision boundary axis + shots_1 = rotate_and_center_data(Shots_1[:,0],Shots_1[:,1],dec_bounds['mean'], dec_bounds['12'], phi=np.pi/2) + shots_2 = rotate_and_center_data(Shots_2[:,0],Shots_2[:,1],dec_bounds['mean'], dec_bounds['12'], phi=np.pi/2) + # Take relavant quadrature + shots_1 = shots_1[:,0] + shots_2 = shots_2[:,0] + n_shots_2 = len(shots_2) + # find range + _all_shots = np.concatenate((shots_1, shots_2)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x1, n1 = np.unique(shots_1, return_counts=True) + x2, n2 = np.unique(shots_2, return_counts=True) + Fid_12, threshold_12 = _calculate_fid_and_threshold(x1, n1, x2, n2) + # Histogram of shots for 1 and 2 + h1, bin_edges = np.histogram(shots_1, bins=100, range=_range) + h2, bin_edges = np.histogram(shots_2, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt1, popt2, params_12 = _fit_double_gauss(bin_centers, h1, h2) + # Save processed data + self.proc_data_dict['projection_12'] = {} + self.proc_data_dict['projection_12']['h1'] = h1 + self.proc_data_dict['projection_12']['h2'] = h2 + self.proc_data_dict['projection_12']['bin_centers'] = bin_centers + self.proc_data_dict['projection_12']['popt1'] = popt1 + self.proc_data_dict['projection_12']['popt2'] = popt2 + self.proc_data_dict['projection_12']['SNR'] = params_12['SNR'] + self.proc_data_dict['projection_12']['Fid'] = Fid_12 + self.proc_data_dict['projection_12']['threshold'] = threshold_12 + ############################ + # Projection along 02 axis. + ############################ + # Rotate shots over 02 decision boundary axis + shots_0 = rotate_and_center_data(Shots_0[:,0],Shots_0[:,1],dec_bounds['mean'],dec_bounds['02'], phi=np.pi/2) + shots_2 = rotate_and_center_data(Shots_2[:,0],Shots_2[:,1],dec_bounds['mean'],dec_bounds['02'], phi=np.pi/2) + # Take relavant quadrature + shots_0 = shots_0[:,0] + shots_2 = shots_2[:,0] + n_shots_2 = len(shots_2) + # find range + _all_shots = np.concatenate((shots_0, shots_2)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x0, n0 = np.unique(shots_0, return_counts=True) + x2, n2 = np.unique(shots_2, return_counts=True) + Fid_02, threshold_02 = _calculate_fid_and_threshold(x0, n0, x2, n2) + # Histogram of shots for 1 and 2 + h0, bin_edges = np.histogram(shots_0, bins=100, range=_range) + h2, bin_edges = np.histogram(shots_2, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt0, popt2, params_02 = _fit_double_gauss(bin_centers, h0, h2) + # Save processed data + self.proc_data_dict['projection_02'] = {} + self.proc_data_dict['projection_02']['h0'] = h0 + self.proc_data_dict['projection_02']['h2'] = h2 + self.proc_data_dict['projection_02']['bin_centers'] = bin_centers + self.proc_data_dict['projection_02']['popt0'] = popt0 + self.proc_data_dict['projection_02']['popt2'] = popt2 + self.proc_data_dict['projection_02']['SNR'] = params_02['SNR'] + self.proc_data_dict['projection_02']['Fid'] = Fid_02 + self.proc_data_dict['projection_02']['threshold'] = threshold_02 + + if self.h_state: + # Parse data for classifier + data = np.concatenate((Shots_0, Shots_1, Shots_2, Shots_3)) + labels = [0 for s in Shots_0]+[1 for s in Shots_1]+\ + [2 for s in Shots_2]+[3 for s in Shots_3] + from sklearn.discriminant_analysis import LinearDiscriminantAnalysis + clf = LinearDiscriminantAnalysis() + clf.fit(data, labels) + dec_bounds = _decision_boundary_points(clf.coef_, clf.intercept_) + # dec_bounds = _decision_boundary_points(clf.coef_, clf.intercept_) + Fid_dict = {} + for state, shots in zip([ '0', '1', '2', '3'], + [Shots_0, Shots_1, Shots_2, Shots_3]): + _res = clf.predict(shots) + _fid = np.mean(_res == int(state)) + Fid_dict[state] = _fid + Fid_dict['avg'] = np.mean([f for f in Fid_dict.values()]) + # Get assignment fidelity matrix + M = np.zeros((4,4)) + for i, shots in enumerate([Shots_0, Shots_1, Shots_2, Shots_3]): + for j, state in enumerate(['0', '1', '2', '3']): + _res = clf.predict(shots) + M[i][j] = np.mean(_res == int(state)) + self.proc_data_dict['h_classifier'] = clf + self.proc_data_dict['h_dec_bounds'] = dec_bounds + self.proc_data_dict['h_Fid_dict'] = Fid_dict + self.qoi['h_Fid_dict'] = Fid_dict + self.qoi['h_Assignment_matrix'] = M + + def prepare_plots(self): + self.axs_dict = {} + fig, ax = plt.subplots(figsize=(5,4), dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict['main'] = ax + self.figs['main'] = fig + self.plot_dicts['main'] = { + 'plotfn': ssro_hist_plotfn, + 'ax_id': 'main', + 'bin_centers': self.proc_data_dict['bin_centers'], + 'h0': self.proc_data_dict['h0'], + 'h1': self.proc_data_dict['h1'], + 'popt0': self.proc_data_dict['popt0'], + 'popt1': self.proc_data_dict['popt1'], + 'threshold': self.proc_data_dict['threshold_raw'], + 'Fid_raw': self.qoi['F_a'], + 'Fid_fit': self.proc_data_dict['F_fit'], + 'Fid_disc': self.qoi['F_d'], + 'SNR': self.qoi['SNR'], + 'P_e0': self.proc_data_dict['residual_excitation'], + 'P_g1': self.proc_data_dict['relaxation_events'], + 'n_shots_0': self.proc_data_dict['n_shots_0'], + 'n_shots_1': self.proc_data_dict['n_shots_1'], + 'T_eff': self.proc_data_dict['effective_temperature'], + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + if self.raw_data_dict['data'].shape[1] == 3: + fig, ax = plt.subplots(figsize=(4,4), dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict['main2'] = ax + self.figs['main2'] = fig + self.plot_dicts['main2'] = { + 'plotfn': ssro_IQ_plotfn, + 'ax_id': 'main2', + 'shots_0': self.proc_data_dict['shots_0_IQ'], + 'shots_1': self.proc_data_dict['shots_1_IQ'], + 'shots_2': self.proc_data_dict['shots_2_IQ'] if self.f_state else None, + 'shots_3': self.proc_data_dict['shots_3_IQ'] if self.h_state else None, + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + if self.f_state: + fig = plt.figure(figsize=(8,4), dpi=100) + axs = [fig.add_subplot(121), + fig.add_subplot(322), + fig.add_subplot(324), + fig.add_subplot(326)] + # fig.patch.set_alpha(0) + self.axs_dict['main3'] = axs[0] + self.figs['main3'] = fig + self.plot_dicts['main3'] = { + 'plotfn': ssro_IQ_projection_plotfn, + 'ax_id': 'main3', + 'shots_0': self.proc_data_dict['Shots_0'], + 'shots_1': self.proc_data_dict['Shots_1'], + 'shots_2': self.proc_data_dict['Shots_2'], + 'projection_01': self.proc_data_dict['projection_01'], + 'projection_12': self.proc_data_dict['projection_12'], + 'projection_02': self.proc_data_dict['projection_02'], + 'classifier': self.proc_data_dict['classifier'], + 'dec_bounds': self.proc_data_dict['dec_bounds'], + 'Fid_dict': self.proc_data_dict['Fid_dict'], + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + fig, ax = plt.subplots(figsize=(3,3), dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict['Assignment_matrix'] = ax + self.figs['Assignment_matrix'] = fig + self.plot_dicts['Assignment_matrix'] = { + 'plotfn': assignment_matrix_plotfn, + 'ax_id': 'Assignment_matrix', + 'M': self.qoi['Assignment_matrix'], + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + if self.h_state: + fig, ax = plt.subplots(figsize=(3,3), dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict['Assignment_matrix_h'] = ax + self.figs['Assignment_matrix_h'] = fig + self.plot_dicts['Assignment_matrix_h'] = { + 'plotfn': assignment_matrix_plotfn, + 'ax_id': 'Assignment_matrix_h', + 'M': self.qoi['h_Assignment_matrix'], + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + fig, ax = plt.subplots(figsize=(4,4), dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict['main4'] = ax + self.figs['main4'] = fig + self.plot_dicts['main4'] = { + 'plotfn': ssro_IQ_plotfn, + 'ax_id': 'main4', + 'shots_0': self.proc_data_dict['Shots_0'], + 'shots_1': self.proc_data_dict['Shots_1'], + 'shots_2': self.proc_data_dict['Shots_2'], + 'shots_3': self.proc_data_dict['Shots_3'], + 'qubit': self.qubit, + 'timestamp': self.timestamp, + 'dec_bounds': self.proc_data_dict['h_dec_bounds'], + 'Fid_dict': self.proc_data_dict['h_Fid_dict'], + } + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def ssro_hist_plotfn( + bin_centers, + h0, h1, + popt0, popt1, + threshold, + qubit, + timestamp, + Fid_raw, + Fid_fit, + Fid_disc, + SNR, + P_e0, P_g1, + n_shots_0, + n_shots_1, + ax, + T_eff=None, + **kw): + fig = ax.get_figure() + bin_width = bin_centers[1]-bin_centers[0] + ax.bar(bin_centers, h0, bin_width, fc='C0', alpha=0.4) + ax.bar(bin_centers, h1, bin_width, fc='C3', alpha=0.4) + ax.plot(bin_centers, double_gauss(bin_centers, *popt0), '-C0', label='ground state') + ax.plot(bin_centers, double_gauss(bin_centers, *popt1), '-C3', label='excited state') + ax.axvline(threshold, ls='--', color='k', label='threshold') + # Write results + text = '\n'.join(('Fidelity and fit results:', + rf'$\mathrm{"{F_{assign}}"}:\:\:\:{Fid_raw*100:.2f}$%', + rf'$\mathrm{"{F_{fit}}"}:\:\:\:\:\:\:\:\:\:\:{Fid_fit*100:.2f}$%', + rf'$\mathrm{"{F_{discr}}"}:\:\:\:\:\:\:{Fid_disc*100:.2f}$%', + rf'$\mathrm{"{SNR}"}:\:\:\:\:\:\:\:{SNR:.2f}$', + '', + 'Spurious events:', + rf'$P(e|0)={P_e0*100:.2f}$%', + rf'$P(g|\pi)={P_g1*100:.2f}$%', + '', + 'Number of shots:', + f'$0$: {n_shots_0}\t$\pi$: {n_shots_1}', + '')) + if T_eff: + text += '\nEffective temperature:\n'+\ + f'$T_{"{qubit}"}$ : {T_eff*1e3:.0f} mK' + props = dict(boxstyle='round', facecolor='gray', alpha=0.15) + ax.text(1.05, 0.8, text, transform=ax.transAxes, + verticalalignment='top', bbox=props) + ax.legend(frameon=False, bbox_to_anchor=(1.01, 1.05)) + # ax.set_yscale('log') + ax.set_xlim(bin_centers[[0,-1]]) + ax.set_ylim(bottom=0) + ax.set_xlabel('Integrated voltage (a.u.)') + ax.set_ylabel('Number of shots') + ax.set_title(f'{timestamp}\nHistogram of shots qubit {qubit}') + +def ssro_IQ_plotfn( + shots_0, + shots_1, + shots_2, + shots_3, + timestamp, + qubit, + ax, + dec_bounds=None, + Fid_dict=None, + **kw): + fig = ax.get_figure() + # Fit 2D gaussians + from scipy.optimize import curve_fit + def twoD_Gaussian(data, amplitude, x0, y0, sigma_x, sigma_y, theta): + x, y = data + x0 = float(x0) + y0 = float(y0) + a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2) + b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2) + c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2) + g = amplitude*np.exp( - (a*((x-x0)**2) + 2*b*(x-x0)*(y-y0) + + c*((y-y0)**2))) + return g.ravel() + def _fit_2D_gaussian(X, Y): + counts, _x, _y = np.histogram2d(X, Y, bins=[100, 100], density=True) + x = (_x[:-1] + _x[1:]) / 2 + y = (_y[:-1] + _y[1:]) / 2 + _x, _y = np.meshgrid(_x, _y) + x, y = np.meshgrid(x, y) + p0 = [counts.max(), np.mean(X), np.mean(Y), np.std(X), np.std(Y), 0] + popt, pcov = curve_fit(twoD_Gaussian, (x, y), counts.T.ravel(), p0=p0) + return popt + popt_0 = _fit_2D_gaussian(shots_0[:,0], shots_0[:,1]) + popt_1 = _fit_2D_gaussian(shots_1[:,0], shots_1[:,1]) + # Plot stuff + ax.plot(shots_0[:,0], shots_0[:,1], '.', color='C0', alpha=0.05) + ax.plot(shots_1[:,0], shots_1[:,1], '.', color='C3', alpha=0.05) + ax.plot([0, popt_0[1]], [0, popt_0[2]], '--', color='k', lw=.5) + ax.plot([0, popt_1[1]], [0, popt_1[2]], '--', color='k', lw=.5) + ax.plot(popt_0[1], popt_0[2], '.', color='C0', label='ground') + ax.plot(popt_1[1], popt_1[2], '.', color='C3', label='excited') + ax.plot(popt_0[1], popt_0[2], 'x', color='white') + ax.plot(popt_1[1], popt_1[2], 'x', color='white') + # Draw 4sigma ellipse around mean + from matplotlib.patches import Ellipse + circle_0 = Ellipse((popt_0[1], popt_0[2]), + width=4*popt_0[3], height=4*popt_0[4], + angle=-popt_0[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + ax.add_patch(circle_0) + circle_1 = Ellipse((popt_1[1], popt_1[2]), + width=4*popt_1[3], height=4*popt_1[4], + angle=-popt_1[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + ax.add_patch(circle_1) + _all_shots = np.concatenate((shots_0, shots_1)) + if type(shots_2) != type(None): + popt_2 = _fit_2D_gaussian(shots_2[:,0], shots_2[:,1]) + ax.plot(shots_2[:,0], shots_2[:,1], '.', color='C2', alpha=0.05) + ax.plot([0, popt_2[1]], [0, popt_2[2]], '--', color='k', lw=.5) + ax.plot(popt_2[1], popt_2[2], '.', color='C2', label='$2^\mathrm{nd}$ excited') + ax.plot(popt_2[1], popt_2[2], 'x', color='white') + # Draw 4sigma ellipse around mean + circle_2 = Ellipse((popt_2[1], popt_2[2]), + width=4*popt_2[3], height=4*popt_2[4], + angle=-popt_2[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + ax.add_patch(circle_2) + _all_shots = np.concatenate((_all_shots, shots_2)) + if type(shots_3) != type(None): + popt_3 = _fit_2D_gaussian(shots_3[:,0], shots_3[:,1]) + ax.plot(shots_3[:,0], shots_3[:,1], '.', color='gold', alpha=0.05) + ax.plot([0, popt_3[1]], [0, popt_3[2]], '--', color='k', lw=.5) + ax.plot(popt_3[1], popt_3[2], '.', color='gold', label='$3^\mathrm{rd}$ excited') + ax.plot(popt_3[1], popt_3[2], 'x', color='white') + # Draw 4sigma ellipse around mean + circle_3 = Ellipse((popt_3[1], popt_3[2]), + width=4*popt_3[3], height=4*popt_3[4], + angle=-popt_3[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + ax.add_patch(circle_3) + _all_shots = np.concatenate((_all_shots, shots_3)) + + _lim = np.max([ np.max(np.abs(_all_shots[:,0]))*1.1, np.max(np.abs(_all_shots[:,1]))*1.1 ]) + ax.set_xlim(-_lim, _lim) + ax.set_ylim(-_lim, _lim) + ax.legend(frameon=False) + ax.set_xlabel('Integrated voltage I') + ax.set_ylabel('Integrated voltage Q') + ax.set_title(f'{timestamp}\nIQ plot qubit {qubit}') + if dec_bounds: + # Plot decision boundary + _bounds = list(dec_bounds.keys()) + _bounds.remove('mean') + Lim_points = {} + for bound in _bounds: + dec_bounds['mean'] + _x0, _y0 = dec_bounds['mean'] + _x1, _y1 = dec_bounds[bound] + a = (_y1-_y0)/(_x1-_x0) + b = _y0 - a*_x0 + _xlim = 1e2*np.sign(_x1-_x0) + _ylim = a*_xlim + b + Lim_points[bound] = _xlim, _ylim + # Plot classifier zones + from matplotlib.patches import Polygon + # Plot 0 area + _points = [dec_bounds['mean'], Lim_points['01'], Lim_points['30']] + _patch = Polygon(_points, color='C0', alpha=0.2, lw=0) + ax.add_patch(_patch) + # Plot 1 area + _points = [dec_bounds['mean'], Lim_points['01'], Lim_points['12']] + _patch = Polygon(_points, color='C3', alpha=0.2, lw=0) + ax.add_patch(_patch) + # Plot 2 area + _points = [dec_bounds['mean'], Lim_points['23'], Lim_points['12']] + _patch = Polygon(_points, color='C2', alpha=0.2, lw=0) + ax.add_patch(_patch) + if type(shots_3) != type(None): + # Plot 3 area + _points = [dec_bounds['mean'], Lim_points['23'], Lim_points['30']] + _patch = Polygon(_points, color='gold', alpha=0.2, lw=0) + ax.add_patch(_patch) + for bound in _bounds: + _x0, _y0 = dec_bounds['mean'] + _x1, _y1 = Lim_points[bound] + ax.plot([_x0, _x1], [_y0, _y1], 'k--', lw=1) + if Fid_dict: + # Write fidelity textbox + text = '\n'.join(('Assignment fidelity:', + f'$F_g$ : {Fid_dict["0"]*100:.1f}%', + f'$F_e$ : {Fid_dict["1"]*100:.1f}%', + f'$F_f$ : {Fid_dict["2"]*100:.1f}%', + f'$F_h$ : {Fid_dict["3"]*100:.1f}%', + f'$F_\mathrm{"{avg}"}$ : {Fid_dict["avg"]*100:.1f}%')) + props = dict(boxstyle='round', facecolor='white', alpha=1) + ax.text(1.05, 1, text, transform=ax.transAxes, + verticalalignment='top', bbox=props) + +def ssro_IQ_projection_plotfn( + shots_0, + shots_1, + shots_2, + projection_01, + projection_12, + projection_02, + classifier, + dec_bounds, + Fid_dict, + timestamp, + qubit, + ax, **kw): + fig = ax.get_figure() + axs = fig.get_axes() + # Fit 2D gaussians + from scipy.optimize import curve_fit + def twoD_Gaussian(data, amplitude, x0, y0, sigma_x, sigma_y, theta): + x, y = data + x0 = float(x0) + y0 = float(y0) + a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2) + b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2) + c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2) + g = amplitude*np.exp( - (a*((x-x0)**2) + 2*b*(x-x0)*(y-y0) + + c*((y-y0)**2))) + return g.ravel() + def _fit_2D_gaussian(X, Y): + counts, _x, _y = np.histogram2d(X, Y, bins=[100, 100], density=True) + x = (_x[:-1] + _x[1:]) / 2 + y = (_y[:-1] + _y[1:]) / 2 + _x, _y = np.meshgrid(_x, _y) + x, y = np.meshgrid(x, y) + p0 = [counts.max(), np.mean(X), np.mean(Y), np.std(X), np.std(Y), 0] + popt, pcov = curve_fit(twoD_Gaussian, (x, y), counts.T.ravel(), p0=p0) + return popt + popt_0 = _fit_2D_gaussian(shots_0[:,0], shots_0[:,1]) + popt_1 = _fit_2D_gaussian(shots_1[:,0], shots_1[:,1]) + popt_2 = _fit_2D_gaussian(shots_2[:,0], shots_2[:,1]) + # Plot stuff + axs[0].plot(shots_0[:,0], shots_0[:,1], '.', color='C0', alpha=0.05) + axs[0].plot(shots_1[:,0], shots_1[:,1], '.', color='C3', alpha=0.05) + axs[0].plot(shots_2[:,0], shots_2[:,1], '.', color='C2', alpha=0.05) + axs[0].plot([0, popt_0[1]], [0, popt_0[2]], '--', color='k', lw=.5) + axs[0].plot([0, popt_1[1]], [0, popt_1[2]], '--', color='k', lw=.5) + axs[0].plot([0, popt_2[1]], [0, popt_2[2]], '--', color='k', lw=.5) + axs[0].plot(popt_0[1], popt_0[2], '.', color='C0', label='ground') + axs[0].plot(popt_1[1], popt_1[2], '.', color='C3', label='excited') + axs[0].plot(popt_2[1], popt_2[2], '.', color='C2', label='$2^\mathrm{nd}$ excited') + axs[0].plot(popt_0[1], popt_0[2], 'x', color='white') + axs[0].plot(popt_1[1], popt_1[2], 'x', color='white') + axs[0].plot(popt_2[1], popt_2[2], 'x', color='white') + # Draw 4sigma ellipse around mean + from matplotlib.patches import Ellipse + circle_0 = Ellipse((popt_0[1], popt_0[2]), + width=4*popt_0[3], height=4*popt_0[4], + angle=-popt_0[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[0].add_patch(circle_0) + circle_1 = Ellipse((popt_1[1], popt_1[2]), + width=4*popt_1[3], height=4*popt_1[4], + angle=-popt_1[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[0].add_patch(circle_1) + circle_2 = Ellipse((popt_2[1], popt_2[2]), + width=4*popt_2[3], height=4*popt_2[4], + angle=-popt_2[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[0].add_patch(circle_2) + # Plot classifier zones + from matplotlib.patches import Polygon + _all_shots = np.concatenate((shots_0, shots_1, shots_2)) + _lim = np.max([ np.max(np.abs(_all_shots[:,0]))*1.1, np.max(np.abs(_all_shots[:,1]))*1.1 ]) + Lim_points = {} + for bound in ['01', '12', '02']: + dec_bounds['mean'] + _x0, _y0 = dec_bounds['mean'] + _x1, _y1 = dec_bounds[bound] + a = (_y1-_y0)/(_x1-_x0) + b = _y0 - a*_x0 + _xlim = 1e2*np.sign(_x1-_x0) + _ylim = a*_xlim + b + Lim_points[bound] = _xlim, _ylim + # Plot 0 area + _points = [dec_bounds['mean'], Lim_points['01'], Lim_points['02']] + _patch = Polygon(_points, color='C0', alpha=0.2, lw=0) + axs[0].add_patch(_patch) + # Plot 1 area + _points = [dec_bounds['mean'], Lim_points['01'], Lim_points['12']] + _patch = Polygon(_points, color='C3', alpha=0.2, lw=0) + axs[0].add_patch(_patch) + # Plot 2 area + _points = [dec_bounds['mean'], Lim_points['02'], Lim_points['12']] + _patch = Polygon(_points, color='C2', alpha=0.2, lw=0) + axs[0].add_patch(_patch) + # Plot decision boundary + for bound in ['01', '12', '02']: + _x0, _y0 = dec_bounds['mean'] + _x1, _y1 = Lim_points[bound] + axs[0].plot([_x0, _x1], [_y0, _y1], 'k--', lw=1) + axs[0].set_xlim(-_lim, _lim) + axs[0].set_ylim(-_lim, _lim) + axs[0].legend(frameon=False) + axs[0].set_xlabel('Integrated voltage I') + axs[0].set_ylabel('Integrated voltage Q') + axs[0].set_title(f'IQ plot qubit {qubit}') + fig.suptitle(f'{timestamp}\n') + ########################## + # Plot projections + ########################## + # 01 projection + _bin_c = projection_01['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[1].bar(_bin_c, projection_01['h0'], bin_width, fc='C0', alpha=0.4) + axs[1].bar(_bin_c, projection_01['h1'], bin_width, fc='C3', alpha=0.4) + axs[1].plot(_bin_c, double_gauss(_bin_c, *projection_01['popt0']), '-C0') + axs[1].plot(_bin_c, double_gauss(_bin_c, *projection_01['popt1']), '-C3') + axs[1].axvline(projection_01['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_01["Fid"]*100:.1f}%', + f'SNR : {projection_01["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[1].text(.775, .9, text, transform=axs[1].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[1].text(projection_01['popt0'][0], projection_01['popt0'][4]/2, + r'$|g\rangle$', ha='center', va='center', color='C0') + axs[1].text(projection_01['popt1'][0], projection_01['popt1'][4]/2, + r'$|e\rangle$', ha='center', va='center', color='C3') + axs[1].set_xticklabels([]) + axs[1].set_xlim(_bin_c[0], _bin_c[-1]) + axs[1].set_ylim(bottom=0) + axs[1].set_title('Projection of data') + # 12 projection + _bin_c = projection_12['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[2].bar(_bin_c, projection_12['h1'], bin_width, fc='C3', alpha=0.4) + axs[2].bar(_bin_c, projection_12['h2'], bin_width, fc='C2', alpha=0.4) + axs[2].plot(_bin_c, double_gauss(_bin_c, *projection_12['popt1']), '-C3') + axs[2].plot(_bin_c, double_gauss(_bin_c, *projection_12['popt2']), '-C2') + axs[2].axvline(projection_12['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_12["Fid"]*100:.1f}%', + f'SNR : {projection_12["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[2].text(.775, .9, text, transform=axs[2].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[2].text(projection_12['popt1'][0], projection_12['popt1'][4]/2, + r'$|e\rangle$', ha='center', va='center', color='C3') + axs[2].text(projection_12['popt2'][0], projection_12['popt2'][4]/2, + r'$|f\rangle$', ha='center', va='center', color='C2') + axs[2].set_xticklabels([]) + axs[2].set_xlim(_bin_c[0], _bin_c[-1]) + axs[2].set_ylim(bottom=0) + # 02 projection + _bin_c = projection_02['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[3].bar(_bin_c, projection_02['h0'], bin_width, fc='C0', alpha=0.4) + axs[3].bar(_bin_c, projection_02['h2'], bin_width, fc='C2', alpha=0.4) + axs[3].plot(_bin_c, double_gauss(_bin_c, *projection_02['popt0']), '-C0') + axs[3].plot(_bin_c, double_gauss(_bin_c, *projection_02['popt2']), '-C2') + axs[3].axvline(projection_02['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_02["Fid"]*100:.1f}%', + f'SNR : {projection_02["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[3].text(.775, .9, text, transform=axs[3].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[3].text(projection_02['popt0'][0], projection_02['popt0'][4]/2, + r'$|g\rangle$', ha='center', va='center', color='C0') + axs[3].text(projection_02['popt2'][0], projection_02['popt2'][4]/2, + r'$|f\rangle$', ha='center', va='center', color='C2') + axs[3].set_xticklabels([]) + axs[3].set_xlim(_bin_c[0], _bin_c[-1]) + axs[3].set_ylim(bottom=0) + axs[3].set_xlabel('Integrated voltage') + # Write fidelity textbox + text = '\n'.join(('Assignment fidelity:', + f'$F_g$ : {Fid_dict["0"]*100:.1f}%', + f'$F_e$ : {Fid_dict["1"]*100:.1f}%', + f'$F_f$ : {Fid_dict["2"]*100:.1f}%', + f'$F_\mathrm{"{avg}"}$ : {Fid_dict["avg"]*100:.1f}%')) + props = dict(boxstyle='round', facecolor='gray', alpha=.2) + axs[1].text(1.05, 1, text, transform=axs[1].transAxes, + verticalalignment='top', bbox=props) + +def assignment_matrix_plotfn( + M, + qubit, + timestamp, + ax, **kw): + fig = ax.get_figure() + im = ax.imshow(M, cmap=plt.cm.Reds, vmin=0, vmax=1) + n = len(M) + for i in range(n): + for j in range(n): + c = M[j,i] + if abs(c) > .5: + ax.text(i, j, '{:.2f}'.format(c), va='center', ha='center', + color = 'white') + else: + ax.text(i, j, '{:.2f}'.format(c), va='center', ha='center') + ax.set_xticks(np.arange(n)) + ax.set_xticklabels([f'$|{i}\\rangle$' for i in range(n)]) + ax.set_xlabel('Assigned state') + ax.set_yticks(np.arange(n)) + ax.set_yticklabels([f'$|{i}\\rangle$' for i in range(n)]) + ax.set_ylabel('Prepared state') + name = qubit + if n==3: + name = 'Qutrit' + elif n==4: + name = 'Ququat' + ax.set_title(f'{timestamp}\n{name} assignment matrix qubit {qubit}') + cbar_ax = fig.add_axes([.95, .15, .03, .7]) + cb = fig.colorbar(im, cax=cbar_ax) + cb.set_label('assignment probability') + class Dispersive_shift_Analysis(ba.BaseDataAnalysis): ''' @@ -1449,382 +2554,2489 @@ def prepare_plots(self): } -class Multiplexed_Readout_Analysis_deprecated(ba.BaseDataAnalysis): +class Optimal_integration_weights_analysis(ba.BaseDataAnalysis): """ - For two qubits, to make an n-qubit mux readout experiment. - we should vectorize this analysis - - TODO: This needs to be rewritten/debugged! - Suggestion: - Use N*(N-1)/2 instances of Singleshot_Readout_Analysis, - run them without saving the plots and then merge together the - plot_dicts as in the cross_dephasing_analysis. + Mux transient analysis. """ + def __init__(self, + IF: float, + input_waveform: tuple = None, + t_start: str = None, t_stop: str = None, + label: str = '', + options_dict: dict = None, extract_only: bool = False, + auto=True): - def __init__(self, t_start: str=None, t_stop: str=None, - label: str='', - data_file_path: str=None, - options_dict: dict=None, extract_only: bool=False, - nr_of_qubits: int = 2, - qubit_names: list=None, - do_fitting: bool=True, auto=True): - """ - Inherits from BaseDataAnalysis. - Extra arguments of interest - qubit_names (list) : used to label the experiments, names of the - qubits. LSQ is last name in the list. If not specified will - set qubit_names to [qN, ..., q1, q0] + super().__init__(t_start=t_start, t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) + self.IF = IF + self.input_waveform = input_waveform + if auto: + self.run_analysis() + def extract_data(self): + """ + This is a new style (sept 2019) data extraction. + This could at some point move to a higher level class. """ - self.nr_of_qubits = nr_of_qubits - if qubit_names is None: - self.qubit_names = list(reversed(['q{}'.format(i) - for i in range(nr_of_qubits)])) + self.get_timestamps() + self.raw_data_dict = {} + for ts in self.timestamps: + data_fp = get_datafilepath_from_timestamp(ts) + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + self.raw_data_dict[ts] = h5d.extract_pars_from_datafile( + data_fp, param_spec) + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + + def process_data(self): + ts_off = self.timestamps[0] + ts_on = self.timestamps[1] + Time = self.raw_data_dict[ts_off]['data'][:,0] + Trace_I_0 = self.raw_data_dict[ts_off]['data'][:,1] + Trace_Q_0 = self.raw_data_dict[ts_off]['data'][:,2] + Trace_I_1 = self.raw_data_dict[ts_on]['data'][:,1] + Trace_Q_1 = self.raw_data_dict[ts_on]['data'][:,2] + # Subtract offset + _trace_I_0 = Trace_I_0 - np.mean(Trace_I_0) + _trace_Q_0 = Trace_Q_0 - np.mean(Trace_Q_0) + _trace_I_1 = Trace_I_1 - np.mean(Trace_I_1) + _trace_Q_1 = Trace_Q_1 - np.mean(Trace_Q_1) + # Demodulate traces + def _demodulate(Time, I, Q, IF): + Complex_vec = I + 1j*Q + I_demod = np.real(np.exp(1j*2*np.pi*IF*Time)*Complex_vec) + Q_demod = np.imag(np.exp(1j*2*np.pi*IF*Time)*Complex_vec) + return I_demod, Q_demod + Trace_I_0_demod, Trace_Q_0_demod = _demodulate(Time, _trace_I_0, _trace_Q_0, self.IF) + Trace_I_1_demod, Trace_Q_1_demod = _demodulate(Time, _trace_I_1, _trace_Q_1, self.IF) + + # Calculate optimal weights + Weights_I = _trace_I_1 - _trace_I_0 + Weights_Q = _trace_Q_1 - _trace_Q_0 + # joint rescaling to +/-1 Volt + maxI = np.max(np.abs(Weights_I)) + maxQ = np.max(np.abs(Weights_Q)) + # Dividing the weight functions by four to not have overflow in + # thresholding of the UHFQC + weight_scale_factor = 1./(4*np.max([maxI, maxQ])) + Weights_I = np.array(weight_scale_factor*Weights_I) + Weights_Q = np.array(weight_scale_factor*Weights_Q) + + # Demodulate weights + Weights_I_demod, Weights_Q_demod = _demodulate(Time, Weights_I, Weights_Q, self.IF) + # Smooth weights + from scipy.signal import medfilt + Weights_I_demod_s = medfilt(Weights_I_demod, 31) + Weights_Q_demod_s = medfilt(Weights_Q_demod, 31) + Weights_I_s, Weights_Q_s = _demodulate(Time, Weights_I_demod_s, Weights_Q_demod_s, -self.IF) + + # PSD of output signal + time_step = Time[1] + ps_0 = np.abs(np.fft.fft(1j*_trace_I_0+_trace_Q_0))**2*time_step/len(Time) + ps_1 = np.abs(np.fft.fft(1j*_trace_I_1+_trace_Q_1))**2*time_step/len(Time) + Freqs = np.fft.fftfreq(_trace_I_0.size, time_step) + idx = np.argsort(Freqs) + Freqs = Freqs[idx] + ps_0 = ps_0[idx] + ps_1 = ps_1[idx] + # PSD of input signal + if type(self.input_waveform) != type(None): + _n_tt = len(Time) + _n_wf = len(self.input_waveform[0]) + in_wf_I = np.concatenate((self.input_waveform[0], + np.zeros(_n_tt-_n_wf))) + in_wf_Q = np.concatenate((self.input_waveform[1], + np.zeros(_n_tt-_n_wf))) + in_wf = 1j*in_wf_I + in_wf_Q + ps_wf = np.abs(np.fft.fft(in_wf))**2*time_step/len(in_wf) + Freqs_wf = np.fft.fftfreq(in_wf.size, time_step) + idx_wf = np.argsort(Freqs_wf) + Freqs_wf = Freqs_wf[idx_wf] + ps_wf = ps_wf[idx_wf] + # normalize (for plotting purposes) + ps_wf = ps_wf/np.max(ps_wf)*max([np.max(ps_0),np.max(ps_1)])*1.1 + self.proc_data_dict['Freqs_wf'] = Freqs_wf + self.proc_data_dict['ps_wf'] = ps_wf + + self.proc_data_dict['Time'] = Time + self.proc_data_dict['Trace_I_0'] = Trace_I_0 + self.proc_data_dict['Trace_Q_0'] = Trace_Q_0 + self.proc_data_dict['Trace_I_1'] = Trace_I_1 + self.proc_data_dict['Trace_Q_1'] = Trace_Q_1 + self.proc_data_dict['Trace_I_0_demod'] = Trace_I_0_demod + self.proc_data_dict['Trace_Q_0_demod'] = Trace_Q_0_demod + self.proc_data_dict['Trace_I_1_demod'] = Trace_I_1_demod + self.proc_data_dict['Trace_Q_1_demod'] = Trace_Q_1_demod + self.proc_data_dict['Weights_I_demod'] = Weights_I_demod + self.proc_data_dict['Weights_Q_demod'] = Weights_Q_demod + self.proc_data_dict['Weights_I_demod_s'] = Weights_I_demod_s + self.proc_data_dict['Weights_Q_demod_s'] = Weights_Q_demod_s + self.proc_data_dict['Weights_I_s'] = Weights_I_s + self.proc_data_dict['Weights_Q_s'] = Weights_Q_s + self.proc_data_dict['Freqs'] = Freqs + self.proc_data_dict['ps_0'] = ps_0 + self.proc_data_dict['ps_1'] = ps_1 + + self.qoi = {} + self.qoi['Weights_I_s'] = Weights_I_s + self.qoi['Weights_Q_s'] = Weights_Q_s + + # If second state + if len(self.timestamps) == 3: + self.f_state = True else: - self.qubit_names = qubit_names + self.f_state = False + if self.f_state: + ts_two = self.timestamps[2] + Trace_I_2 = self.raw_data_dict[ts_two]['data'][:,1] + Trace_Q_2 = self.raw_data_dict[ts_two]['data'][:,2] + # Subtract offset + _trace_I_2 = Trace_I_2 - np.mean(Trace_I_2) + _trace_Q_2 = Trace_Q_2 - np.mean(Trace_Q_2) + # Demodulate traces + Trace_I_2_demod, Trace_Q_2_demod = _demodulate(Time, _trace_I_2, + _trace_Q_2, self.IF) + # Calculate optimal weights + Weights_I_ef = _trace_I_2 - _trace_I_1 + Weights_Q_ef = _trace_Q_2 - _trace_Q_1 + # joint rescaling to +/-1 Volt + maxI = np.max(np.abs(Weights_I_ef)) + maxQ = np.max(np.abs(Weights_Q_ef)) + # Dividing the weight functions by four to not have overflow in + # thresholding of the UHFQC + weight_scale_factor = 1./(4*np.max([maxI, maxQ])) + Weights_I_ef = np.array(weight_scale_factor*Weights_I_ef) + Weights_Q_ef = np.array(weight_scale_factor*Weights_Q_ef) + # Demodulate weights + Weights_I_ef_demod, Weights_Q_ef_demod = _demodulate(Time, Weights_I_ef, + Weights_Q_ef, self.IF) + # Smooth weights + from scipy.signal import medfilt + Weights_I_ef_demod_s = medfilt(Weights_I_ef_demod, 31) + Weights_Q_ef_demod_s = medfilt(Weights_Q_ef_demod, 31) + Weights_I_ef_s, Weights_Q_ef_s = _demodulate(Time, Weights_I_ef_demod_s, + Weights_Q_ef_demod_s, -self.IF) + # Save quantities + self.proc_data_dict['Trace_I_2'] = Trace_I_2 + self.proc_data_dict['Trace_Q_2'] = Trace_Q_2 + self.proc_data_dict['Trace_I_2_demod'] = Trace_I_2_demod + self.proc_data_dict['Trace_Q_2_demod'] = Trace_Q_2_demod + self.proc_data_dict['Weights_I_ef_demod'] = Weights_I_ef_demod + self.proc_data_dict['Weights_Q_ef_demod'] = Weights_Q_ef_demod + self.proc_data_dict['Weights_I_ef_demod_s'] = Weights_I_ef_demod_s + self.proc_data_dict['Weights_Q_ef_demod_s'] = Weights_Q_ef_demod_s + self.proc_data_dict['Weights_I_ef_s'] = Weights_I_ef_s + self.proc_data_dict['Weights_Q_ef_s'] = Weights_Q_ef_s + self.qoi['Weights_I_ef_s'] = Weights_I_ef_s + self.qoi['Weights_Q_ef_s'] = Weights_Q_ef_s + + def prepare_plots(self): + + self.axs_dict = {} + n = len(self.timestamps) + fig, axs = plt.subplots(figsize=(9.75/2*n, 5.2), nrows=2, ncols=n, sharex=True, sharey='row', dpi=100) + axs = axs.flatten() + # fig.patch.set_alpha(0) + self.axs_dict['Transients_plot'] = axs[0] + self.figs['Transients_plot'] = fig + self.plot_dicts['Transients_plot'] = { + 'plotfn': Transients_plotfn, + 'ax_id': 'Transients_plot', + 'Time': self.proc_data_dict['Time'], + 'Trace_I_0': self.proc_data_dict['Trace_I_0'], + 'Trace_Q_0': self.proc_data_dict['Trace_Q_0'], + 'Trace_I_1': self.proc_data_dict['Trace_I_1'], + 'Trace_Q_1': self.proc_data_dict['Trace_Q_1'], + 'Trace_I_2': self.proc_data_dict['Trace_I_2'] if self.f_state else None, + 'Trace_Q_2': self.proc_data_dict['Trace_Q_2'] if self.f_state else None, + 'Trace_I_0_demod': self.proc_data_dict['Trace_I_0_demod'], + 'Trace_Q_0_demod': self.proc_data_dict['Trace_Q_0_demod'], + 'Trace_I_1_demod': self.proc_data_dict['Trace_I_1_demod'], + 'Trace_Q_1_demod': self.proc_data_dict['Trace_Q_1_demod'], + 'Trace_I_2_demod': self.proc_data_dict['Trace_I_2_demod'] if self.f_state else None, + 'Trace_Q_2_demod': self.proc_data_dict['Trace_Q_2_demod'] if self.f_state else None, + 'timestamp': self.timestamps[1] + } + + fig, ax = plt.subplots(figsize=(5, 5), dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict['IQ_trajectory_plot'] = ax + self.figs['IQ_trajectory_plot'] = fig + self.plot_dicts['IQ_trajectory_plot'] = { + 'plotfn': IQ_plotfn, + 'ax_id': 'IQ_trajectory_plot', + 'Trace_I_0_demod': self.proc_data_dict['Trace_I_0_demod'], + 'Trace_Q_0_demod': self.proc_data_dict['Trace_Q_0_demod'], + 'Trace_I_1_demod': self.proc_data_dict['Trace_I_1_demod'], + 'Trace_Q_1_demod': self.proc_data_dict['Trace_Q_1_demod'], + 'Trace_I_2_demod': self.proc_data_dict['Trace_I_2_demod'] if self.f_state else None, + 'Trace_Q_2_demod': self.proc_data_dict['Trace_Q_2_demod'] if self.f_state else None, + 'timestamp': self.timestamps[1] + } + + fig, axs = plt.subplots(figsize=(9*1.4, 3*1.4), ncols=2, + gridspec_kw={'width_ratios': [5*1.4, 3*1.4]}, dpi=100) + axs = axs.flatten() + # fig.patch.set_alpha(0) + self.axs_dict['Optimal_weights_plot'] = axs[0] + self.figs['Optimal_weights_plot'] = fig + self.plot_dicts['Optimal_weights_plot'] = { + 'plotfn': Weights_plotfn, + 'ax_id': 'Optimal_weights_plot', + 'Time': self.proc_data_dict['Time'], + 'Weights_I_demod': self.proc_data_dict['Weights_I_demod'], + 'Weights_Q_demod': self.proc_data_dict['Weights_Q_demod'], + 'Weights_I_demod_s': self.proc_data_dict['Weights_I_demod_s'], + 'Weights_Q_demod_s': self.proc_data_dict['Weights_Q_demod_s'], + 'Weights_I_ef_demod': self.proc_data_dict['Weights_I_ef_demod'] if self.f_state else None, + 'Weights_Q_ef_demod': self.proc_data_dict['Weights_Q_ef_demod'] if self.f_state else None, + 'Weights_I_ef_demod_s': self.proc_data_dict['Weights_I_ef_demod_s'] if self.f_state else None, + 'Weights_Q_ef_demod_s': self.proc_data_dict['Weights_Q_ef_demod_s'] if self.f_state else None, + 'timestamp': self.timestamps[1] + } + + fig, axs = plt.subplots(figsize=(8,3), ncols=2, dpi=100, + sharey=True) + axs = axs.flatten() + # fig.patch.set_alpha(0) + self.axs_dict['FFT_plot'] = axs[0] + self.figs['FFT_plot'] = fig + self.plot_dicts['FFT_plot'] = { + 'plotfn': FFT_plotfn, + 'ax_id': 'FFT_plot', + 'Freqs': self.proc_data_dict['Freqs'], + 'ps_0': self.proc_data_dict['ps_0'], + 'ps_1': self.proc_data_dict['ps_1'], + 'Freqs_wf': self.proc_data_dict['Freqs_wf'] if type(self.input_waveform)!=type(None) else None, + 'ps_wf': self.proc_data_dict['ps_wf'] if type(self.input_waveform)!=type(None) else None, + 'IF': self.IF, + 'timestamp': self.timestamps[1] + } + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def Transients_plotfn( + Time, + Trace_I_0, Trace_Q_0, + Trace_I_1, Trace_Q_1, + Trace_I_2, Trace_Q_2, + Trace_I_0_demod, Trace_Q_0_demod, + Trace_I_1_demod, Trace_Q_1_demod, + Trace_I_2_demod, Trace_Q_2_demod, + timestamp, + ax, **kw): + fig = ax.get_figure() + axs = fig.get_axes() + if type(Trace_I_2) != type(None): + n = 3 + else: + n = 2 + axs[0].plot(Time*1e6, Trace_I_0, color='#82B1FF', ls='-', lw=1, label='In phase component') + axs[n].plot(Time*1e6, Trace_Q_0, color='#82B1FF', ls='-', lw=1, label='Quadrature component') + axs[1].plot(Time*1e6, Trace_I_1, color='#E57373', ls='-', lw=1, label='In phase component') + axs[n+1].plot(Time*1e6, Trace_Q_1, color='#E57373', ls='-', lw=1, label='Quadrature component') + axs[0].plot(Time*1e6, Trace_I_0_demod, color='#0D47A1', ls='-', lw=1) + axs[n].plot(Time*1e6, Trace_Q_0_demod, color='#0D47A1', ls='-', lw=1) + axs[1].plot(Time*1e6, Trace_I_1_demod, color='#C62828', ls='-', lw=1) + axs[n+1].plot(Time*1e6, Trace_Q_1_demod, color='#C62828', ls='-', lw=1) + if n == 3: + axs[2].plot(Time*1e6, Trace_I_2, color='#A5D6A7', ls='-', lw=1, label='In phase component') + axs[n+2].plot(Time*1e6, Trace_Q_2, color='#A5D6A7', ls='-', lw=1, label='Quadrature component') + axs[2].plot(Time*1e6, Trace_I_2_demod, color='#2E7D32', ls='-', lw=1) + axs[n+2].plot(Time*1e6, Trace_Q_2_demod, color='#2E7D32', ls='-', lw=1) + axs[n+2].set_xlabel('Time ($\mathrm{\mu s}$)') + axs[2].set_title(r'$2^\mathrm{nd}$ excited state') + axs[2].legend(frameon=False, fontsize=9) + axs[n+2].legend(frameon=False, fontsize=9) + + axs[n].set_xlabel('Time ($\mathrm{\mu s}$)') + axs[n+1].set_xlabel('Time ($\mathrm{\mu s}$)') + axs[0].set_ylabel('Voltage (V)') + axs[n].set_ylabel('Voltage (V)') + axs[0].set_title('Ground state') + axs[1].set_title('Excited state') + axs[0].legend(frameon=False, fontsize=9) + axs[1].legend(frameon=False, fontsize=9) + axs[n].legend(frameon=False, fontsize=9) + axs[n+1].legend(frameon=False, fontsize=9) + fig.suptitle(f'{timestamp}\nReadout transients', y=.95) + fig.tight_layout() + +def IQ_plotfn( + Trace_I_0_demod, Trace_Q_0_demod, + Trace_I_1_demod, Trace_Q_1_demod, + Trace_I_2_demod, Trace_Q_2_demod, + timestamp, + ax, **kw): + fig = ax.get_figure() + axs = fig.get_axes() + + ax.plot(Trace_I_0_demod*1e3, Trace_Q_0_demod*1e3, color='#0D47A1', ls='-', lw=.5, label='ground') + ax.plot(Trace_I_1_demod*1e3, Trace_Q_1_demod*1e3, color='#C62828', ls='-', lw=.5, label='excited') + _lim = np.max(np.concatenate((np.abs(Trace_I_0_demod*1e3), np.abs(Trace_Q_0_demod*1e3), + np.abs(Trace_I_1_demod*1e3), np.abs(Trace_Q_1_demod*1e3)))) + if type(Trace_I_2_demod) != type(None): + ax.plot(Trace_I_2_demod*1e3, Trace_Q_2_demod*1e3, color='C2', ls='-', lw=.5, label='$2^{nd}$ excited') + _lim = np.max(np.concatenate((np.abs(Trace_I_0_demod*1e3), np.abs(Trace_Q_0_demod*1e3), + np.abs(Trace_I_1_demod*1e3), np.abs(Trace_Q_1_demod*1e3), + np.abs(Trace_I_2_demod*1e3), np.abs(Trace_Q_2_demod*1e3)))) + ax.set_xlim(-_lim*1.2, _lim*1.2) + ax.set_ylim(-_lim*1.2, _lim*1.2) + ax.set_xlabel('I Voltage (mV)') + ax.set_ylabel('Q Voltage (mV)') + ax.set_title(f'{timestamp}\nIQ trajectory') + ax.legend(frameon=False, bbox_to_anchor=(1.01, 1)) + +def Weights_plotfn( + Time, + Weights_I_demod, Weights_Q_demod, + Weights_I_demod_s, Weights_Q_demod_s, + Weights_I_ef_demod, Weights_Q_ef_demod, + Weights_I_ef_demod_s, Weights_Q_ef_demod_s, + timestamp, + ax, **kw): + fig = ax.get_figure() + axs = fig.get_axes() + + axs[0].plot(Time*1e6, Weights_I_demod, color='C0', ls='-', lw=1, alpha=.25) + axs[0].plot(Time*1e6, Weights_Q_demod, color='#6A1B9A', ls='-', lw=1, alpha=.25) + axs[0].plot(Time*1e6, Weights_I_demod_s, color='C0', ls='-', lw=2, alpha=1, label='Weight function I') + axs[0].plot(Time*1e6, Weights_Q_demod_s, color='#6A1B9A', ls='-', lw=2, alpha=1, label='Weight function Q') + axs[1].plot(Weights_I_demod, Weights_Q_demod, color='C0', ls='-', lw=.5, alpha=.5) + axs[1].plot(Weights_I_demod_s, Weights_Q_demod_s, color='C0', ls='-', lw=2, alpha=1, label='$ge$ weights') + _lim = np.max(np.concatenate((np.abs(Weights_I_demod), np.abs(Weights_Q_demod)))) + if type(Weights_I_ef_demod) != type(None): + axs[0].plot(Time*1e6, Weights_I_ef_demod, color='#008b00', ls='-', lw=1, alpha=.25) + axs[0].plot(Time*1e6, Weights_Q_ef_demod, color='#B71C1C', ls='-', lw=1, alpha=.25) + axs[0].plot(Time*1e6, Weights_I_ef_demod_s, color='#008b00', ls='-', lw=2, alpha=1, label='Weight function I ef') + axs[0].plot(Time*1e6, Weights_Q_ef_demod_s, color='#B71C1C', ls='-', lw=2, alpha=1, label='Weight function Q ef') + axs[1].plot(Weights_I_ef_demod, Weights_Q_ef_demod, color='C2', ls='-', lw=.5, alpha=.5) + axs[1].plot(Weights_I_ef_demod_s, Weights_Q_ef_demod_s, color='C2', ls='-', lw=2, alpha=1, label='$ef$ weights') + _lim = np.max(np.concatenate((np.abs(Weights_I_demod), np.abs(Weights_Q_demod), + np.abs(Weights_I_ef_demod), np.abs(Weights_Q_ef_demod)))) + axs[0].set_xlabel('Time ($\mathrm{\mu s}$)') + axs[0].set_ylabel('Amplitude (a.u.)') + axs[0].legend(frameon=False, fontsize=7) + axs[1].set_xlim(-_lim*1.1, _lim*1.1) + axs[1].set_ylim(-_lim*1.1, _lim*1.1) + axs[1].set_xticklabels([]) + axs[1].set_yticklabels([]) + axs[1].set_xlabel('I component (a.u.)') + axs[1].set_ylabel('Q component (a.u.)') + axs[0].set_title('Optimal integration weights') + axs[1].set_title('IQ trajectory') + axs[1].legend(frameon=False) + fig.suptitle(f'{timestamp}') + +def FFT_plotfn( + Freqs, + IF, + ps_0, + ps_1, + timestamp, + ax, + Freqs_wf = None, + ps_wf = None, + **kw): + fig = ax.get_figure() + axs = fig.get_axes() + # Remove first and last points of + # array to remove nyquist frequency + Freqs = Freqs[1:-1] + ps_0 = ps_0[1:-1] + ps_1 = ps_1[1:-1] + axs[0].plot(Freqs*1e-6, ps_0, 'C0') + axs[0].plot(Freqs*1e-6, ps_1, 'C3') + if type(Freqs_wf) != None: + Freqs_wf = Freqs_wf[1:-1] + ps_wf = ps_wf[1:-1] + axs[0].plot(Freqs_wf*1e-6, ps_wf, '--', color='#607D8B', alpha=.5) + axs[0].axvline(IF*1e-6, color='k', ls='--', lw=1, label=f'IF : {IF*1e-6:.1f} MHz') + axs[0].set_xlim(left=np.min(Freqs*1e-6), right=np.max(Freqs*1e-6)) + axs[0].set_xlabel('Frequency (MHz)') + axs[0].set_ylabel('PSD ($\mathrm{V^2/Hz}$)') + + axs[1].plot(Freqs*1e-6, ps_0, 'C0', label='ground') + axs[1].plot(Freqs*1e-6, ps_1, 'C3', label='excited') + if type(Freqs_wf) != None: + axs[1].plot(Freqs_wf*1e-6, ps_wf, 'C2--', + label='input pulse', alpha=.5) + axs[1].axvline(IF*1e-6, color='k', ls='--', lw=1) + axs[1].set_xlim(left=IF*1e-6-50, right=IF*1e-6+50) + axs[1].set_xlabel('Frequency (MHz)') + axs[0].legend(frameon=False) + axs[1].legend(frameon=False, fontsize=7, bbox_to_anchor=(1,1)) + fig.suptitle(f'{timestamp}\nTransients PSD', y=1.025) + + +class measurement_QND_analysis(ba.BaseDataAnalysis): + """ + This analysis extracts measurement QND metrics + For details on the procedure see: + arXiv:2110.04285 + """ + def __init__(self, + qubit:str, + f_state: bool = False, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + auto=True + ): super().__init__(t_start=t_start, t_stop=t_stop, label=label, - data_file_path=data_file_path, options_dict=options_dict, - extract_only=extract_only, do_fitting=do_fitting) - self.single_timestamp = False - self.params_dict = { - 'measurementstring': 'measurementstring', - 'measured_values': 'measured_values', - 'value_names': 'value_names', - 'value_units': 'value_units'} + extract_only=extract_only) + + self.qubit = qubit + self.f_state = f_state - self.numeric_params = [] if auto: self.run_analysis() - def process_data(self): + def extract_data(self): """ - Responsible for creating the histograms based on the raw data + This is a new style (sept 2019) data extraction. + This could at some point move to a higher level class. """ - # Determine the shape of the data to extract wheter to rotate or not - nr_bins = int(self.options_dict.get('nr_bins', 100)) + self.get_timestamps() + self.timestamp = self.timestamps[0] + data_fp = get_datafilepath_from_timestamp(self.timestamp) + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + self.raw_data_dict = h5d.extract_pars_from_datafile( + data_fp, param_spec) + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] - # self.proc_data_dict['shots_0'] = [''] * nr_expts - # self.proc_data_dict['shots_1'] = [''] * nr_expts - - ################################################################# - # Separating data into shots for the different prepared states # - ################################################################# - self.proc_data_dict['nr_of_qubits'] = self.nr_of_qubits - self.proc_data_dict['qubit_names'] = self.qubit_names + def process_data(self): + if self.f_state: + _cycle = 6 + else: + _cycle = 5 + # Calibration shots + I0, Q0 = self.raw_data_dict['data'][:,1][3::_cycle], self.raw_data_dict['data'][:,2][3::_cycle] + I1, Q1 = self.raw_data_dict['data'][:,1][4::_cycle], self.raw_data_dict['data'][:,2][4::_cycle] + if self.f_state: + I2, Q2 = self.raw_data_dict['data'][:,1][5::_cycle], self.raw_data_dict['data'][:,2][5::_cycle] + center_2 = np.array([np.mean(I2), np.mean(Q2)]) + # Measurement + IM1, QM1 = self.raw_data_dict['data'][0::_cycle,1], self.raw_data_dict['data'][0::_cycle,2] + IM2, QM2 = self.raw_data_dict['data'][1::_cycle,1], self.raw_data_dict['data'][1::_cycle,2] + IM3, QM3 = self.raw_data_dict['data'][2::_cycle,1], self.raw_data_dict['data'][2::_cycle,2] + # Rotate data + center_0 = np.array([np.mean(I0), np.mean(Q0)]) + center_1 = np.array([np.mean(I1), np.mean(Q1)]) + def rotate_and_center_data(I, Q, vec0, vec1): + vector = vec1-vec0 + angle = np.arctan(vector[1]/vector[0]) + rot_matrix = np.array([[ np.cos(-angle),-np.sin(-angle)], + [ np.sin(-angle), np.cos(-angle)]]) + # Subtract mean + proc = np.array((I-(vec0+vec1)[0]/2, Q-(vec0+vec1)[1]/2)) + # Rotate theta + proc = np.dot(rot_matrix, proc) + return proc + I0_proc, Q0_proc = rotate_and_center_data(I0, Q0, center_0, center_1) + I1_proc, Q1_proc = rotate_and_center_data(I1, Q1, center_0, center_1) + IM1_proc, QM1_proc = rotate_and_center_data(IM1, QM1, center_0, center_1) + IM2_proc, QM2_proc = rotate_and_center_data(IM2, QM2, center_0, center_1) + IM3_proc, QM3_proc = rotate_and_center_data(IM3, QM3, center_0, center_1) + if np.mean(I0_proc) > np.mean(I1_proc): + I0_proc *= -1 + I1_proc *= -1 + IM1_proc *= -1 + IM2_proc *= -1 + IM3_proc *= -1 + # Calculate optimal threshold + ubins_A_0, ucounts_A_0 = np.unique(I0_proc, return_counts=True) + ubins_A_1, ucounts_A_1 = np.unique(I1_proc, return_counts=True) + ucumsum_A_0 = np.cumsum(ucounts_A_0) + ucumsum_A_1 = np.cumsum(ucounts_A_1) + # merge |0> and |1> shot bins + all_bins_A = np.unique(np.sort(np.concatenate((ubins_A_0, ubins_A_1)))) + # interpolate cumsum for all bins + int_cumsum_A_0 = np.interp(x=all_bins_A, xp=ubins_A_0, fp=ucumsum_A_0, left=0) + int_cumsum_A_1 = np.interp(x=all_bins_A, xp=ubins_A_1, fp=ucumsum_A_1, left=0) + norm_cumsum_A_0 = int_cumsum_A_0/np.max(int_cumsum_A_0) + norm_cumsum_A_1 = int_cumsum_A_1/np.max(int_cumsum_A_1) + # Calculating threshold + F_vs_th = (1-(1-abs(norm_cumsum_A_0-norm_cumsum_A_1))/2) + opt_idxs = np.argwhere(F_vs_th == np.amax(F_vs_th)) + opt_idx = int(round(np.average(opt_idxs))) + threshold = all_bins_A[opt_idx] + # digitize data + P0_dig = np.array([ 0 if s float: + meas_probs = variables.reshape(num_states, num_states, num_states) + probs = np.einsum("ijk, klm -> ijl", meas_probs, meas_probs) + return np.linalg.norm(np.ravel(probs - obs_probs)) + + cons_mat = np.zeros((num_constraints, num_vars), dtype=int) + num_cons_vars = int(num_vars / num_constraints) + for init_state in range(NUM_STATES): + var_ind = init_state * num_cons_vars + cons_mat[init_state, var_ind : var_ind + num_cons_vars] = 1 + + constraints = {"type": "eq", "fun": lambda variables: cons_mat @ variables - 1} + bounds = opt.Bounds(0, 1) + + ideal_probs = np.zeros((NUM_STATES, NUM_OUTCOMES, NUM_OUTCOMES), dtype=float) + for state in range(NUM_STATES): + ideal_probs[state, state, state] = 1 + init_vec = np.ravel(ideal_probs) + + result = opt.basinhopping( + opt_func, + init_vec, + niter=500, + minimizer_kwargs=dict( + args=(joint_probs.data, NUM_STATES), + bounds=bounds, + constraints=constraints, + method="SLSQP", + tol=1e-12, + options=dict( + maxiter=10000, + ) + ) + ) + # if not result.success: + # raise ValueError("Unsuccessful optimization, please check parameters and tolerance.") + res_data = result.x.reshape((NUM_STATES, NUM_OUTCOMES, NUM_STATES)) + + meas_probs = xr.DataArray( + res_data, + dims = ["input_state", "outcome", "output_state"], + coords = dict( + input_state = STATES, + outcome = OUTCOMES, + output_state = STATES, + ) + ) + + pred_joint_probs = np.einsum("ijk, klm -> ijl", meas_probs, meas_probs) + + true_vals = np.ravel(joint_probs) + pred_vals = np.ravel(pred_joint_probs) + + ms_error = mean_squared_error(true_vals, pred_vals) + rms_error = np.sqrt(ms_error) + ma_error = mean_absolute_error(true_vals, pred_vals) + + print(f"RMS error of the optimised solution: {rms_error}") + print(f"MA error of the optimised solution: {ma_error}") + + num_vars = 3 * (NUM_STATES ** 2) + num_constraints = 3 * NUM_STATES + + def opt_func( + variables, + obs_probs, + num_states: int, + ) -> float: + pre_mat, ro_mat, post_mat = variables.reshape(3, num_states, num_states) + probs = np.einsum("ih, hm, ho -> imo", pre_mat, ro_mat, post_mat) + return np.linalg.norm(probs - obs_probs) + + cons_mat = np.zeros((num_constraints, num_vars), dtype=int) + for op_ind in range(3): + for init_state in range(NUM_STATES): + cons_ind = op_ind*NUM_STATES + init_state + var_ind = (op_ind*NUM_STATES + init_state)*NUM_STATES + cons_mat[cons_ind, var_ind : var_ind + NUM_STATES] = 1 + + ideal_probs = np.tile(np.eye(NUM_STATES), (3, 1)) + init_vec = np.ravel(ideal_probs) + + constraints = {"type": "eq", "fun": lambda variables: cons_mat @ variables - 1} + bounds = opt.Bounds(0, 1, keep_feasible=True) + + result = opt.basinhopping( + opt_func, + init_vec, + minimizer_kwargs = dict( + args = (meas_probs.data, NUM_STATES), + bounds = bounds, + constraints = constraints, + method = "SLSQP", + tol = 1e-12, + options = dict( + maxiter = 10000, + ) + ), + niter=500 + ) + + + # if not result.success: + # raise ValueError("Unsuccessful optimization, please check parameters and tolerance.") + + pre_trans, ass_errors, post_trans = result.x.reshape((3, NUM_STATES, NUM_STATES)) + + pred_meas_probs = np.einsum("ih, hm, ho -> imo", pre_trans, ass_errors, post_trans) + + true_vals = np.ravel(meas_probs) + pred_vals = np.ravel(pred_meas_probs) + + ms_error = mean_squared_error(true_vals, pred_vals) + rms_error = np.sqrt(ms_error) + ma_error = mean_absolute_error(true_vals, pred_vals) + + print(f"RMS error of the optimised solution: {rms_error}") + print(f"MA error of the optimised solution: {ma_error}") + + QND_state = {} + for state in STATES: + state_qnd = np.sum(meas_probs.data[state,:, state]) + QND_state[f'{state}'] = state_qnd + + meas_qnd = np.mean(np.diag(meas_probs.sum(axis=1))) + meas_qnd + + fit_res = {} + fit_res['butter_prob'] = pred_meas_probs + fit_res['mean_QND'] = meas_qnd + fit_res['state_qnd'] = QND_state + fit_res['ass_errors'] = ass_errors + fit_res['qutrit_fidelity'] = accuracy*100 + fit_res['fidelity'] = fid + fit_res['timestamp'] = timestamp + + # Meas leak rate + L1 = 100*np.sum(fit_res['butter_prob'][:2,:,2])/2 + + # Meas seepage rate + s = 100*np.sum(fit_res['butter_prob'][2,:,:2]) + + fit_res['L1'] = L1 + fit_res['seepage'] = s + + return fit_res - combinations = [int2base( - i, base=base, fixed_length=self.nr_of_qubits) for i in - range(number_of_experiments)] - self.proc_data_dict['combinations'] = combinations +class measurement_butterfly_analysis(ba.BaseDataAnalysis): + """ + This analysis extracts measurement butter fly + """ + def __init__(self, + qubit:str, + t_start: str = None, + t_stop: str = None, + label: str = '', + f_state: bool = False, + cycle : int = 6, + options_dict: dict = None, + extract_only: bool = False, + auto=True + ): - for i, comb in enumerate(combinations): - # No post selection implemented yet - self.proc_data_dict['{} {}'.format(ch_name, comb)] = \ - self.proc_data_dict[ch_name][i::number_of_experiments] - ##################################### - # Binning data into 1D histograms # - ##################################### - hist_name = 'hist {} {}'.format( - ch_name, comb) - self.proc_data_dict[hist_name] = np.histogram( - self.proc_data_dict['{} {}'.format( - ch_name, comb)], - bins=nr_bins, range=(min_sh, max_sh)) - # Cumulative histograms # - chist_name = 'c'+hist_name - # the cumulative histograms are normalized to ensure the right - # fidelities can be calculated - self.proc_data_dict[chist_name] = np.cumsum( - self.proc_data_dict[hist_name][0])/( - np.sum(self.proc_data_dict[hist_name][0])) - - self.proc_data_dict['bin_centers {}'.format(ch_name)] = ( - self.proc_data_dict[hist_name][1][:-1] + - self.proc_data_dict[hist_name][1][1:]) / 2 - - self.proc_data_dict['binsize {}'.format(ch_name)] = ( - self.proc_data_dict[hist_name][1][1] - - self.proc_data_dict[hist_name][1][0]) - - ##################################################################### - # Combining histograms of all different combinations and calc Fid. - ###################################################################### - for ch_idx, ch_name in enumerate(self.proc_data_dict['ch_names']): - # Create labels for the specific combinations - comb_str_0, comb_str_1, comb_str_2 = get_arb_comb_xx_label( - self.proc_data_dict['nr_of_qubits'], qubit_idx=ch_idx) - - # Initialize the arrays - self.proc_data_dict['hist {} {}'.format(ch_name, comb_str_0)] = \ - [np.zeros(nr_bins), np.zeros(nr_bins+1)] - self.proc_data_dict['hist {} {}'.format(ch_name, comb_str_1)] = \ - [np.zeros(nr_bins), np.zeros(nr_bins+1)] - zero_hist = self.proc_data_dict['hist {} {}'.format( - ch_name, comb_str_0)] - one_hist = self.proc_data_dict['hist {} {}'.format( - ch_name, comb_str_1)] - - # Fill them with data from the relevant combinations - for i, comb in enumerate(self.proc_data_dict['combinations']): - if comb[-(ch_idx+1)] == '0': - zero_hist[0] += self.proc_data_dict[ - 'hist {} {}'.format(ch_name, comb)][0] - zero_hist[1] = self.proc_data_dict[ - 'hist {} {}'.format(ch_name, comb)][1] - elif comb[-(ch_idx+1)] == '1': - one_hist[0] += self.proc_data_dict[ - 'hist {} {}'.format(ch_name, comb)][0] - one_hist[1] = self.proc_data_dict[ - 'hist {} {}'.format(ch_name, comb)][1] - elif comb[-(ch_idx+1)] == '2': - # Fixme add two state binning - raise NotImplementedError() - - chist_0 = np.cumsum(zero_hist[0])/(np.sum(zero_hist[0])) - chist_1 = np.cumsum(one_hist[0])/(np.sum(one_hist[0])) - - self.proc_data_dict['chist {} {}'.format(ch_name, comb_str_0)] \ - = chist_0 - self.proc_data_dict['chist {} {}'.format(ch_name, comb_str_1)] \ - = chist_1 - ########################################################### - # Threshold and fidelity based on cumulative histograms # - - qubit_name = self.proc_data_dict['qubit_names'][-(ch_idx+1)] - centers = self.proc_data_dict['bin_centers {}'.format(ch_name)] - fid, th = get_assignement_fid_from_cumhist(chist_0, chist_1, - centers) - self.proc_data_dict['F_ass_raw {}'.format(qubit_name)] = fid - self.proc_data_dict['threshold_raw {}'.format(qubit_name)] = th + super().__init__(t_start=t_start, t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) - def prepare_plots(self): - # N.B. If the log option is used we should manually set the - # yscale to go from .5 to the current max as otherwise the fits - # mess up the log plots. - # log_hist = self.options_dict.get('log_hist', False) - - for ch_idx, ch_name in enumerate(self.proc_data_dict['ch_names']): - q_name = self.proc_data_dict['qubit_names'][-(ch_idx+1)] - th_raw = self.proc_data_dict['threshold_raw {}'.format(q_name)] - F_raw = self.proc_data_dict['F_ass_raw {}'.format(q_name)] - - self.plot_dicts['histogram_{}'.format(ch_name)] = { - 'plotfn': make_mux_ssro_histogram, - 'data_dict': self.proc_data_dict, - 'ch_name': ch_name, - 'title': (self.timestamps[0] + ' \n' + - 'SSRO histograms {}'.format(ch_name))} - - thresholds = [th_raw] - threshold_labels = ['thresh. raw'] - - self.plot_dicts['comb_histogram_{}'.format(q_name)] = { - 'plotfn': make_mux_ssro_histogram_combined, - 'data_dict': self.proc_data_dict, - 'ch_name': ch_name, - 'thresholds': thresholds, - 'threshold_labels': threshold_labels, - 'qubit_idx': ch_idx, - 'title': (self.timestamps[0] + ' \n' + - 'Combined SSRO histograms {}'.format(q_name))} - - fid_threshold_msg = 'Summary {}\n'.format(q_name) - fid_threshold_msg += r'$F_{A}$-raw: ' + '{:.3f} \n'.format(F_raw) - fid_threshold_msg += r'thresh. raw: ' + '{:.3f} \n'.format(th_raw) - - self.plot_dicts['fid_threshold_msg_{}'.format(q_name)] = { - 'plotfn': self.plot_text, - 'xpos': 1.05, - 'ypos': .9, - 'horizontalalignment': 'left', - 'text_string': fid_threshold_msg, - 'ax_id': 'comb_histogram_{}'.format(q_name)} + self.qubit = qubit + self.f_state = f_state + if auto: + self.run_analysis() -def get_shots_zero_one(data, post_select: bool=False, - nr_samples: int=2, sample_0: int=0, sample_1: int=1, - post_select_threshold: float = None): - if not post_select: - shots_0, shots_1 = a_tools.zigzag( - data, sample_0, sample_1, nr_samples) - else: - presel_0, presel_1 = a_tools.zigzag( - data, sample_0, sample_1, nr_samples) + def extract_data(self): + """ + This is a new style (sept 2019) data extraction. + This could at some point move to a higher level class. + """ + self.get_timestamps() + self.timestamp = self.timestamps[0] + data_fp = get_datafilepath_from_timestamp(self.timestamp) + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + self.raw_data_dict = h5d.extract_pars_from_datafile( + data_fp, param_spec) + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] - shots_0, shots_1 = a_tools.zigzag( - data, sample_0+1, sample_1+1, nr_samples) + def process_data(self): - if post_select: - post_select_shots_0 = data[0::nr_samples] - shots_0 = data[1::nr_samples] + if self.f_state: + _cycle = 12 + I0, Q0 = self.raw_data_dict['data'][:,1][9::_cycle], self.raw_data_dict['data'][:,2][9::_cycle] + I1, Q1 = self.raw_data_dict['data'][:,1][10::_cycle], self.raw_data_dict['data'][:,2][10::_cycle] + I2, Q2 = self.raw_data_dict['data'][:,1][11::_cycle], self.raw_data_dict['data'][:,2][11::_cycle] + else: + _cycle = 8 + I0, Q0 = self.raw_data_dict['data'][:,1][6::_cycle], self.raw_data_dict['data'][:,2][6::_cycle] + I1, Q1 = self.raw_data_dict['data'][:,1][7::_cycle], self.raw_data_dict['data'][:,2][7::_cycle] + # Measurement + IM1, QM1 = self.raw_data_dict['data'][0::_cycle,1], self.raw_data_dict['data'][0::_cycle,2] + IM2, QM2 = self.raw_data_dict['data'][1::_cycle,1], self.raw_data_dict['data'][1::_cycle,2] + IM3, QM3 = self.raw_data_dict['data'][2::_cycle,1], self.raw_data_dict['data'][2::_cycle,2] + IM4, QM4 = self.raw_data_dict['data'][3::_cycle,1], self.raw_data_dict['data'][3::_cycle,2] + IM5, QM5 = self.raw_data_dict['data'][4::_cycle,1], self.raw_data_dict['data'][4::_cycle,2] + IM6, QM6 = self.raw_data_dict['data'][5::_cycle,1], self.raw_data_dict['data'][5::_cycle,2] + # Rotate data + center_0 = np.array([np.mean(I0), np.mean(Q0)]) + center_1 = np.array([np.mean(I1), np.mean(Q1)]) + if self.f_state: + IM7, QM7 = self.raw_data_dict['data'][6::_cycle,1], self.raw_data_dict['data'][6::_cycle,2] + IM8, QM8 = self.raw_data_dict['data'][7::_cycle,1], self.raw_data_dict['data'][7::_cycle,2] + IM9, QM9 = self.raw_data_dict['data'][8::_cycle,1], self.raw_data_dict['data'][8::_cycle,2] + center_2 = np.array([np.mean(I2), np.mean(Q2)]) + def rotate_and_center_data(I, Q, vec0, vec1): + vector = vec1-vec0 + angle = np.arctan(vector[1]/vector[0]) + rot_matrix = np.array([[ np.cos(-angle),-np.sin(-angle)], + [ np.sin(-angle), np.cos(-angle)]]) + # Subtract mean + proc = np.array((I-(vec0+vec1)[0]/2, Q-(vec0+vec1)[1]/2)) + # Rotate theta + proc = np.dot(rot_matrix, proc) + return proc + # proc cal points + I0_proc, Q0_proc = rotate_and_center_data(I0, Q0, center_0, center_1) + I1_proc, Q1_proc = rotate_and_center_data(I1, Q1, center_0, center_1) + # proc M + IM1_proc, QM1_proc = rotate_and_center_data(IM1, QM1, center_0, center_1) + IM2_proc, QM2_proc = rotate_and_center_data(IM2, QM2, center_0, center_1) + IM3_proc, QM3_proc = rotate_and_center_data(IM3, QM3, center_0, center_1) + IM4_proc, QM4_proc = rotate_and_center_data(IM4, QM4, center_0, center_1) + IM5_proc, QM5_proc = rotate_and_center_data(IM5, QM5, center_0, center_1) + IM6_proc, QM6_proc = rotate_and_center_data(IM6, QM6, center_0, center_1) + if np.mean(I0_proc) > np.mean(I1_proc): + I0_proc *= -1 + I1_proc *= -1 + IM1_proc *= -1 + IM2_proc *= -1 + IM3_proc *= -1 + IM4_proc *= -1 + IM5_proc *= -1 + IM6_proc *= -1 + # Calculate optimal threshold + ubins_A_0, ucounts_A_0 = np.unique(I0_proc, return_counts=True) + ubins_A_1, ucounts_A_1 = np.unique(I1_proc, return_counts=True) + ucumsum_A_0 = np.cumsum(ucounts_A_0) + ucumsum_A_1 = np.cumsum(ucounts_A_1) + # merge |0> and |1> shot bins + all_bins_A = np.unique(np.sort(np.concatenate((ubins_A_0, ubins_A_1)))) + # interpolate cumsum for all bins + int_cumsum_A_0 = np.interp(x=all_bins_A, xp=ubins_A_0, fp=ucumsum_A_0, left=0) + int_cumsum_A_1 = np.interp(x=all_bins_A, xp=ubins_A_1, fp=ucumsum_A_1, left=0) + norm_cumsum_A_0 = int_cumsum_A_0/np.max(int_cumsum_A_0) + norm_cumsum_A_1 = int_cumsum_A_1/np.max(int_cumsum_A_1) + # Calculating threshold + F_vs_th = (1-(1-abs(norm_cumsum_A_0-norm_cumsum_A_1))/2) + opt_idxs = np.argwhere(F_vs_th == np.amax(F_vs_th)) + opt_idx = int(round(np.average(opt_idxs))) + threshold = all_bins_A[opt_idx] + # fidlity calculation from cal point + P0_dig = np.array([ 0 if s .5: + ax.text(i, j, '{:.2f}'.format(c), va='center', ha='center', + color = 'white') + else: + ax.text(i, j, '{:.2f}'.format(c), va='center', ha='center') + ax.set_xticks(np.arange(n)) + ax.set_xticklabels([f'$|{i}\\rangle$' for i in range(n)]) + ax.set_xlabel('Output state') + ax.set_yticks(np.arange(n)) + ax.set_yticklabels([f'$|{i}\\rangle$' for i in range(n)]) + ax.set_ylabel('Input state') + name = qubit + if n==3: + name = 'Qutrit' + elif n==4: + name = 'Ququat' + ax.set_title(f'{timestamp}\n{name} QNDness matrix qubit {qubit}') + cbar_ax = fig.add_axes([.95, .15, .03, .7]) + cb = fig.colorbar(im, cax=cbar_ax) + cb.set_label('transfer probability') + + +class Depletion_AllXY_analysis(ba.BaseDataAnalysis): + """ + """ + def __init__(self, + qubit, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + auto=True + ): + super().__init__(t_start=t_start, t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) + self.qubit = qubit + if auto: + self.run_analysis() - return shots_0, shots_1 + def extract_data(self): + self.get_timestamps() + self.timestamp = self.timestamps[0] + data_fp = get_datafilepath_from_timestamp(self.timestamp) + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + self.raw_data_dict = h5d.extract_pars_from_datafile( + data_fp, param_spec) + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + def process_data(self): + _cycle = 6 + data_0 = self.raw_data_dict['data'][:,1][0::_cycle] + data_1 = self.raw_data_dict['data'][:,1][2::_cycle] + data_2 = self.raw_data_dict['data'][:,1][3::_cycle] + data_3 = self.raw_data_dict['data'][:,1][5::_cycle] + zero_lvl = np.mean(data_0[:2]) + one_lvl = np.mean(data_0[-2:]) + data_0 = (data_0 - zero_lvl)/(one_lvl-zero_lvl) + data_1 = (data_1 - zero_lvl)/(one_lvl-zero_lvl) + data_2 = (data_2 - zero_lvl)/(one_lvl-zero_lvl) + data_3 = (data_3 - zero_lvl)/(one_lvl-zero_lvl) + self.proc_data_dict['data_0'] = data_0 + self.proc_data_dict['data_1'] = data_1 + self.proc_data_dict['data_2'] = data_2 + self.proc_data_dict['data_3'] = data_3 + + def prepare_plots(self): + self.axs_dict = {} + fig, axs = plt.subplots(figsize=(12,4), ncols=2) + axs = axs.flatten() + self.figs['main'] = fig + self.axs_dict['main'] = axs[0] + self.plot_dicts['main'] = { + 'plotfn': plot_depletion_allxy, + 'ax_id': 'main', + 'data_0': self.proc_data_dict['data_0'], + 'data_1': self.proc_data_dict['data_1'], + 'data_2': self.proc_data_dict['data_2'], + 'data_3': self.proc_data_dict['data_3'], + 'qubit': self.qubit, + 'timestamp': self.timestamp + } -def get_arb_comb_xx_label(nr_of_qubits, qubit_idx: int): + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def plot_depletion_allxy(qubit, timestamp, + data_0, data_1, + data_2, data_3, + ax, **kw): + fig = ax.get_figure() + axs = fig.get_axes() + + allXY = ['II', 'XX', 'YY', 'XY', 'YX', 'xI', 'yI', + 'xy', 'yx', 'xY', 'yX', 'Xy', 'Yx', 'xX', + 'Xx', 'yY', 'Yy', 'XI', 'YI', 'xx', 'yy'] + + ideal = [0 for i in range(10)] + [.5 for i in range(24)] + [1 for i in range(8)] + + axs[0].set_xticks(np.arange(0, 42, 2)+.5) + axs[0].set_xticklabels(allXY) + axs[0].set_ylabel(r'P($|1\rangle$)') + axs[0].plot(ideal, 'k--', lw=1, label='ideal') + axs[0].plot(data_0, 'C0o-', alpha=1, label='Standard sequence') + axs[0].plot(data_1, 'C1.-', alpha=.75, label='post-measurement') + axs[0].legend(loc=0) + axs[0].set_title(r'Qubit initialized in $|0\rangle$') + + axs[1].set_xticks(np.arange(0, 42, 2)+.5) + axs[1].set_xticklabels(allXY) + axs[1].set_ylabel(r'P($|1\rangle$)') + axs[1].plot(1-np.array(ideal), 'k--', lw=1, label='ideal') + axs[1].plot(data_2, 'C0o-', alpha=1, label='Standard sequence') + axs[1].plot(data_3, 'C1.-', alpha=.75, label='post-measurement') + axs[1].legend(loc=0) + axs[1].set_title(r'Qubit initialized in $|1\rangle$') + + fig.suptitle(timestamp+'\nDepletion_ALLXY_'+qubit, y=1.0) + + +def _rotate_and_center_data(I, Q, vec0, vec1, phi=0): + ''' + Rotate , shots in IQ plane around axis defined by - + ''' + vector = vec1-vec0 + angle = np.arctan(vector[1]/vector[0]) + rot_matrix = np.array([[ np.cos(-angle+phi),-np.sin(-angle+phi)], + [ np.sin(-angle+phi), np.cos(-angle+phi)]]) + proc = np.array((I, Q)) + proc = np.dot(rot_matrix, proc) + return proc.transpose() + +def _Classify_qubit_calibration_shots(Shots_0, Shots_1): + ''' + Train linear discriminant classifier + to classify Qubit shots in IQ space. + ''' + data = np.concatenate((Shots_0, Shots_1)) + labels = [0 for s in Shots_0]+[1 for s in Shots_1] + from sklearn.discriminant_analysis import LinearDiscriminantAnalysis + clf = LinearDiscriminantAnalysis() + clf.fit(data, labels) + dec_bounds = _decision_boundary_points(clf.coef_, clf.intercept_) + Fid_dict = {} + for state, shots in zip([ '0', '1'], + [Shots_0, Shots_1]): + _res = clf.predict(shots) + _fid = np.mean(_res == int(state)) + Fid_dict[state] = _fid + Fid_dict['avg'] = np.mean([f for f in Fid_dict.values()]) + # Get assignment fidelity matrix + M = np.zeros((2,2)) + for i, shots in enumerate([Shots_0, Shots_1]): + for j, state in enumerate(['0', '1']): + _res = clf.predict(shots) + M[i][j] = np.mean(_res == int(state)) + return clf, Fid_dict, M, dec_bounds + +def _Classify_qutrit_calibration_shots(Shots_0, Shots_1, Shots_2): + ''' + Train linear discriminant classifier + to classify Qutrit shots in IQ space. + ''' + data = np.concatenate((Shots_0, Shots_1, Shots_2)) + labels = [0 for s in Shots_0]+[1 for s in Shots_1]+[2 for s in Shots_2] + from sklearn.discriminant_analysis import LinearDiscriminantAnalysis + clf = LinearDiscriminantAnalysis() + clf.fit(data, labels) + dec_bounds = _decision_boundary_points(clf.coef_, clf.intercept_) + Fid_dict = {} + for state, shots in zip([ '0', '1', '2'], + [Shots_0, Shots_1, Shots_2]): + _res = clf.predict(shots) + _fid = np.mean(_res == int(state)) + Fid_dict[state] = _fid + Fid_dict['avg'] = np.mean([f for f in Fid_dict.values()]) + # Get assignment fidelity matrix + M = np.zeros((3,3)) + for i, shots in enumerate([Shots_0, Shots_1, Shots_2]): + for j, state in enumerate(['0', '1', '2']): + _res = clf.predict(shots) + M[i][j] = np.mean(_res == int(state)) + return clf, Fid_dict, M, dec_bounds + +def calc_assignment_prob_matrix(combinations, digitized_data): + first_key = next(iter(digitized_data)) + n_shots = len(digitized_data[first_key][next(iter(digitized_data[first_key]))]) + # set up empty assignment matrix + assignment_prob_matrix = np.zeros((len(combinations), len(combinations))) + # Loop through different input and assigned states + for i, input_state in enumerate(combinations): + for j, outcome in enumerate(combinations): + Check = np.ones(n_shots) + # loop through different qubits + for k, ch in enumerate(digitized_data.keys()): + # Check if measured state matches the current assigned state + check = digitized_data[ch][input_state] == int(outcome[k]) + Check *= check + assignment_prob_matrix[i][j] = sum(Check)/n_shots + return assignment_prob_matrix + +def calc_cross_fidelity_matrix(combinations, assignment_prob_matrix): + n = int(np.log2(len(combinations))) + C = np.zeros((n, n)) + for i in range(n): + for j in range(n): + # idxs where qubit i is was assigned in ground or excited state + combs_gi = [ idx for idx, comb in enumerate(combinations) if comb[i]=='0' ] + combs_ei = [ idx for idx, comb in enumerate(combinations) if comb[i]=='1' ] + # idxs where qubit j is was prepared in ground or excited

(pi-pulsed) + combs_Ij = [ idx for idx, comb in enumerate(combinations) if comb[j]=='0' ] + combs_Pj = [ idx for idx, comb in enumerate(combinations) if comb[j]=='1' ]# calculate conditional probabilities + # calculate conditional probabilities + P_ei_Ij = np.sum(assignment_prob_matrix[combs_ei][:,combs_Ij]) + P_gi_Pj = np.sum(assignment_prob_matrix[combs_gi][:,combs_Pj]) + # Normalize probabilities + normalization_factor = (len(combinations)/2) + P_ei_Ij = P_ei_Ij/normalization_factor + P_gi_Pj = P_gi_Pj/normalization_factor + # Add entry to cross fidelity matrix + Fc = 1 - P_ei_Ij - P_gi_Pj + C[j,i] = Fc + return C + +def _calculate_fid_and_threshold(x0, n0, x1, n1): """ - Returns labels of the form "xx0xxx", "xx1xxx", "xx2xxx" - Length of the label is equal to the number of qubits + Calculate fidelity and threshold from histogram data: + x0, n0 is the histogram data of shots 0 (value and occurences), + x1, n1 is the histogram data of shots 1 (value and occurences). """ - comb_str_0 = list('x'*nr_of_qubits) - comb_str_0[-(qubit_idx+1)] = '0' - comb_str_0 = "".join(comb_str_0) - - comb_str_1 = list('x'*nr_of_qubits) - comb_str_1[-(qubit_idx+1)] = '1' - comb_str_1 = "".join(comb_str_1) + # Build cumulative histograms of shots 0 + # and 1 in common bins by interpolation. + all_x = np.unique(np.sort(np.concatenate((x0, x1)))) + cumsum0, cumsum1 = np.cumsum(n0), np.cumsum(n1) + ecumsum0 = np.interp(x=all_x, xp=x0, fp=cumsum0, left=0) + necumsum0 = ecumsum0/np.max(ecumsum0) + ecumsum1 = np.interp(x=all_x, xp=x1, fp=cumsum1, left=0) + necumsum1 = ecumsum1/np.max(ecumsum1) + # Calculate optimal threshold and fidelity + F_vs_th = (1-(1-abs(necumsum0 - necumsum1))/2) + opt_idxs = np.argwhere(F_vs_th == np.amax(F_vs_th)) + opt_idx = int(round(np.average(opt_idxs))) + F_assignment_raw = F_vs_th[opt_idx] + threshold_raw = all_x[opt_idx] + return F_assignment_raw, threshold_raw - comb_str_2 = list('x'*nr_of_qubits) - comb_str_2[-(qubit_idx+1)] = '2' - comb_str_2 = "".join(comb_str_2) +def _gauss_pdf(x, x0, sigma): + return np.exp(-((x-x0)/sigma)**2/2) - return comb_str_0, comb_str_1, comb_str_2 +def double_gauss(x, x0, x1, sigma0, sigma1, A, r): + _dist0 = A*( (1-r)*_gauss_pdf(x, x0, sigma0) + r*_gauss_pdf(x, x1, sigma1) ) + return _dist0 +def _double_gauss_joint(x, x0, x1, sigma0, sigma1, A0, A1, r0, r1): + _dist0 = double_gauss(x, x0, x1, sigma0, sigma1, A0, r0) + _dist1 = double_gauss(x, x1, x0, sigma1, sigma0, A1, r1) + return np.concatenate((_dist0, _dist1)) -def get_assignement_fid_from_cumhist(chist_0, chist_1, bin_centers=None): +def _fit_double_gauss(x_vals, hist_0, hist_1): + ''' + Fit two histograms to a double gaussian with + common parameters. From fitted parameters, + calculate SNR, Pe0, Pg1, Teff, Ffit and Fdiscr. + ''' + from scipy.optimize import curve_fit + # Double gaussian model for fitting + def _gauss_pdf(x, x0, sigma): + return np.exp(-((x-x0)/sigma)**2/2) + global double_gauss + def double_gauss(x, x0, x1, sigma0, sigma1, A, r): + _dist0 = A*( (1-r)*_gauss_pdf(x, x0, sigma0) + r*_gauss_pdf(x, x1, sigma1) ) + return _dist0 + # helper function to simultaneously fit both histograms with common parameters + def _double_gauss_joint(x, x0, x1, sigma0, sigma1, A0, A1, r0, r1): + _dist0 = double_gauss(x, x0, x1, sigma0, sigma1, A0, r0) + _dist1 = double_gauss(x, x1, x0, sigma1, sigma0, A1, r1) + return np.concatenate((_dist0, _dist1)) + # Guess for fit + pdf_0 = hist_0/np.sum(hist_0) # Get prob. distribution + pdf_1 = hist_1/np.sum(hist_1) # + _x0_guess = np.sum(x_vals*pdf_0) # calculate mean + _x1_guess = np.sum(x_vals*pdf_1) # + _sigma0_guess = np.sqrt(np.sum((x_vals-_x0_guess)**2*pdf_0)) # calculate std + _sigma1_guess = np.sqrt(np.sum((x_vals-_x1_guess)**2*pdf_1)) # + _r0_guess = 0.01 + _r1_guess = 0.05 + _A0_guess = np.max(hist_0) + _A1_guess = np.max(hist_1) + p0 = [_x0_guess, _x1_guess, _sigma0_guess, _sigma1_guess, _A0_guess, _A1_guess, _r0_guess, _r1_guess] + # Bounding parameters + _x0_bound = (-np.inf,np.inf) + _x1_bound = (-np.inf,np.inf) + _sigma0_bound = (0,np.inf) + _sigma1_bound = (0,np.inf) + _r0_bound = (0,1) + _r1_bound = (0,1) + _A0_bound = (0,np.inf) + _A1_bound = (0,np.inf) + bounds = np.array([_x0_bound, _x1_bound, _sigma0_bound, _sigma1_bound, + _A0_bound, _A1_bound, _r0_bound, _r1_bound]) + # Fit parameters within bounds + popt, pcov = curve_fit( + _double_gauss_joint, x_vals, + np.concatenate((hist_0, hist_1)), + p0=p0, bounds=bounds.transpose()) + popt0 = popt[[0,1,2,3,4,6]] + popt1 = popt[[1,0,3,2,5,7]] + # Calculate quantities of interest + SNR = abs(popt0[0] - popt1[0])/((abs(popt0[2])+abs(popt1[2]))/2) + P_e0 = popt0[5] + P_g1 = popt1[5] + # Fidelity from fit + _range = (np.min(x_vals), np.max(x_vals)) + _x_data = np.linspace(*_range, 10001) + _h0 = double_gauss(_x_data, *popt0)# compute distrubition from + _h1 = double_gauss(_x_data, *popt1)# fitted parameters. + Fid_fit, threshold_fit = _calculate_fid_and_threshold(_x_data, _h0, _x_data, _h1) + # Discrimination fidelity + _h0 = double_gauss(_x_data, *popt0[:-1], 0)# compute distrubition without residual + _h1 = double_gauss(_x_data, *popt1[:-1], 0)# excitation of relaxation. + Fid_discr, threshold_discr = _calculate_fid_and_threshold(_x_data, _h0, _x_data, _h1) + # return results + qoi = { 'SNR': SNR, + 'P_e0': P_e0, 'P_g1': P_g1, + 'Fid_fit': Fid_fit, 'Fid_discr': Fid_discr } + return popt0, popt1, qoi + +def _Analyse_qubit_shots_along_decision_boundaries( + qubit, + Shots_0, Shots_1, + dec_bounds, proc_data_dict): + ''' + Project readout data along axis perpendicular + to the decision boundaries of classifier and computes + quantities of interest. These are saved in the . + ''' + ############################ + # Projection along 01 axis. + ############################ + # Rotate shots over 01 axis + shots_0 = _rotate_and_center_data(Shots_0[:,0],Shots_0[:,1],dec_bounds['left'],dec_bounds['right'],phi=np.pi/2) + shots_1 = _rotate_and_center_data(Shots_1[:,0],Shots_1[:,1],dec_bounds['left'],dec_bounds['right'],phi=np.pi/2) + # Take relavant quadrature + shots_0 = shots_0[:,0] + shots_1 = shots_1[:,0] + n_shots_1 = len(shots_1) + # find range + _all_shots = np.concatenate((shots_0, shots_1)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x0, n0 = np.unique(shots_0, return_counts=True) + x1, n1 = np.unique(shots_1, return_counts=True) + Fid_01, threshold_01 = _calculate_fid_and_threshold(x0, n0, x1, n1) + # Histogram of shots for 1 and 2 + h0, bin_edges = np.histogram(shots_0, bins=100, range=_range) + h1, bin_edges = np.histogram(shots_1, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt0, popt1, params_01 = _fit_double_gauss(bin_centers, h0, h1) + # Save processed data + proc_data_dict[qubit]['projection_qubit'] = {} + proc_data_dict[qubit]['projection_qubit']['h0'] = h0 + proc_data_dict[qubit]['projection_qubit']['h1'] = h1 + proc_data_dict[qubit]['projection_qubit']['bin_centers'] = bin_centers + proc_data_dict[qubit]['projection_qubit']['popt0'] = popt0 + proc_data_dict[qubit]['projection_qubit']['popt1'] = popt1 + proc_data_dict[qubit]['projection_qubit']['SNR'] = params_01['SNR'] + proc_data_dict[qubit]['projection_qubit']['Fid'] = Fid_01 + proc_data_dict[qubit]['projection_qubit']['threshold'] = threshold_01 + +def _Analyse_qutrit_shots_along_decision_boundaries( + qubit, + Shots_0, Shots_1, Shots_2, + dec_bounds, proc_data_dict): + ''' + Project readout data along axis perpendicular + to the decision boundaries of classifier and computes + quantities of interest. These are saved in the . + ''' + ############################ + # Projection along 01 axis. + ############################ + # Rotate shots over 01 axis + shots_0 = _rotate_and_center_data(Shots_0[:,0],Shots_0[:,1],dec_bounds['mean'],dec_bounds['01'],phi=np.pi/2) + shots_1 = _rotate_and_center_data(Shots_1[:,0],Shots_1[:,1],dec_bounds['mean'],dec_bounds['01'],phi=np.pi/2) + # Take relavant quadrature + shots_0 = shots_0[:,0] + shots_1 = shots_1[:,0] + n_shots_1 = len(shots_1) + # find range + _all_shots = np.concatenate((shots_0, shots_1)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x0, n0 = np.unique(shots_0, return_counts=True) + x1, n1 = np.unique(shots_1, return_counts=True) + Fid_01, threshold_01 = _calculate_fid_and_threshold(x0, n0, x1, n1) + # Histogram of shots for 1 and 2 + h0, bin_edges = np.histogram(shots_0, bins=100, range=_range) + h1, bin_edges = np.histogram(shots_1, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt0, popt1, params_01 = _fit_double_gauss(bin_centers, h0, h1) + # Save processed data + proc_data_dict[qubit]['projection_01'] = {} + proc_data_dict[qubit]['projection_01']['h0'] = h0 + proc_data_dict[qubit]['projection_01']['h1'] = h1 + proc_data_dict[qubit]['projection_01']['bin_centers'] = bin_centers + proc_data_dict[qubit]['projection_01']['popt0'] = popt0 + proc_data_dict[qubit]['projection_01']['popt1'] = popt1 + proc_data_dict[qubit]['projection_01']['SNR'] = params_01['SNR'] + proc_data_dict[qubit]['projection_01']['Fid'] = Fid_01 + proc_data_dict[qubit]['projection_01']['threshold'] = threshold_01 + ############################ + # Projection along 12 axis. + ############################ + # Rotate shots over 12 axis + shots_1 = _rotate_and_center_data(Shots_1[:,0],Shots_1[:,1],dec_bounds['mean'], dec_bounds['12'], phi=np.pi/2) + shots_2 = _rotate_and_center_data(Shots_2[:,0],Shots_2[:,1],dec_bounds['mean'], dec_bounds['12'], phi=np.pi/2) + # Take relavant quadrature + shots_1 = shots_1[:,0] + shots_2 = shots_2[:,0] + n_shots_2 = len(shots_2) + # find range + _all_shots = np.concatenate((shots_1, shots_2)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x1, n1 = np.unique(shots_1, return_counts=True) + x2, n2 = np.unique(shots_2, return_counts=True) + Fid_12, threshold_12 = _calculate_fid_and_threshold(x1, n1, x2, n2) + # Histogram of shots for 1 and 2 + h1, bin_edges = np.histogram(shots_1, bins=100, range=_range) + h2, bin_edges = np.histogram(shots_2, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt1, popt2, params_12 = _fit_double_gauss(bin_centers, h1, h2) + # Save processed data + proc_data_dict[qubit]['projection_12'] = {} + proc_data_dict[qubit]['projection_12']['h1'] = h1 + proc_data_dict[qubit]['projection_12']['h2'] = h2 + proc_data_dict[qubit]['projection_12']['bin_centers'] = bin_centers + proc_data_dict[qubit]['projection_12']['popt1'] = popt1 + proc_data_dict[qubit]['projection_12']['popt2'] = popt2 + proc_data_dict[qubit]['projection_12']['SNR'] = params_12['SNR'] + proc_data_dict[qubit]['projection_12']['Fid'] = Fid_12 + proc_data_dict[qubit]['projection_12']['threshold'] = threshold_12 + ############################ + # Projection along 02 axis. + ############################ + # Rotate shots over 02 axis + shots_0 = _rotate_and_center_data(Shots_0[:,0],Shots_0[:,1],dec_bounds['mean'],dec_bounds['02'], phi=np.pi/2) + shots_2 = _rotate_and_center_data(Shots_2[:,0],Shots_2[:,1],dec_bounds['mean'],dec_bounds['02'], phi=np.pi/2) + # Take relavant quadrature + shots_0 = shots_0[:,0] + shots_2 = shots_2[:,0] + n_shots_2 = len(shots_2) + # find range + _all_shots = np.concatenate((shots_0, shots_2)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x0, n0 = np.unique(shots_0, return_counts=True) + x2, n2 = np.unique(shots_2, return_counts=True) + Fid_02, threshold_02 = _calculate_fid_and_threshold(x0, n0, x2, n2) + # Histogram of shots for 1 and 2 + h0, bin_edges = np.histogram(shots_0, bins=100, range=_range) + h2, bin_edges = np.histogram(shots_2, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt0, popt2, params_02 = _fit_double_gauss(bin_centers, h0, h2) + # Save processed data + proc_data_dict[qubit]['projection_02'] = {} + proc_data_dict[qubit]['projection_02']['h0'] = h0 + proc_data_dict[qubit]['projection_02']['h2'] = h2 + proc_data_dict[qubit]['projection_02']['bin_centers'] = bin_centers + proc_data_dict[qubit]['projection_02']['popt0'] = popt0 + proc_data_dict[qubit]['projection_02']['popt2'] = popt2 + proc_data_dict[qubit]['projection_02']['SNR'] = params_02['SNR'] + proc_data_dict[qubit]['projection_02']['Fid'] = Fid_02 + proc_data_dict[qubit]['projection_02']['threshold'] = threshold_02 + +class Multiplexed_Readout_Analysis(ba.BaseDataAnalysis): """ - Returns the average assignment fidelity and threshold - F_assignment_raw = (P01 - P10 )/2 - where Pxy equals probability to measure x when starting in y + Analysis for single-shot Multiplexed readout experiment. + This new analysis now supports post-selection + with two quadratures and 3 state readout. """ - F_vs_th = (1-(1-abs(chist_1 - chist_0))/2) - opt_idx = np.argmax(F_vs_th) - F_assignment_raw = F_vs_th[opt_idx] - - if bin_centers is None: - bin_centers = np.arange(len(chist_0)) - threshold = bin_centers[opt_idx] - - return F_assignment_raw, threshold + def __init__(self, + qubits: list, + heralded_init: bool, + f_state: bool = False, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + auto=True + ): + super().__init__(t_start=t_start, t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) -def make_mux_ssro_histogram_combined(data_dict, ch_name, qubit_idx, - thresholds=None, threshold_labels=None, - title=None, ax=None, **kw): - if ax is None: - f, ax = plt.subplots() - markers = itertools.cycle(('v', '^', 'd')) - - comb_str_0, comb_str_1, comb_str_2 = get_arb_comb_xx_label( - data_dict['nr_of_qubits'], qubit_idx=qubit_idx) - - ax.plot(data_dict['bin_centers {}'.format(ch_name)], - data_dict['hist {} {}'.format(ch_name, comb_str_0)][0], - linestyle='', - marker=next(markers), alpha=.7, label=comb_str_0) - ax.plot(data_dict['bin_centers {}'.format(ch_name)], - data_dict['hist {} {}'.format(ch_name, comb_str_1)][0], - linestyle='', - marker=next(markers), alpha=.7, label=comb_str_1) - - if thresholds is not None: - # this is to support multiple threshold types such as raw, fitted etc. - th_styles = itertools.cycle(('--', '-.', '..')) - for threshold, label in zip(thresholds, threshold_labels): - ax.axvline(threshold, linestyle=next(th_styles), color='grey', - label=label) - - legend_title = "Prep. state [%s]" % ', '.join(data_dict['qubit_names']) - ax.legend(title=legend_title, loc=1) # top right corner - ax.set_ylabel('Counts') - # arbitrary units as we use optimal weights - set_xlabel(ax, ch_name, 'a.u.') - - if title is not None: - ax.set_title(title) + self.qubits = qubits + self.heralded_init = heralded_init + self.f_state = f_state + + if auto: + self.run_analysis() + def extract_data(self): + self.get_timestamps() + self.timestamp = self.timestamps[0] + data_fp = get_datafilepath_from_timestamp(self.timestamp) + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + self.raw_data_dict = h5d.extract_pars_from_datafile( + data_fp, param_spec) + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] -def make_mux_ssro_histogram(data_dict, ch_name, title=None, ax=None, **kw): - if ax is None: - f, ax = plt.subplots() - nr_of_qubits = data_dict['nr_of_qubits'] - markers = itertools.cycle(('v', '<', '>', '^', 'd', 'o', 's', '*')) - for i in range(2**nr_of_qubits): - format_str = '{'+'0:0{}b'.format(nr_of_qubits) + '}' - binning_string = format_str.format(i) - ax.plot(data_dict['bin_centers {}'.format(ch_name)], - data_dict['hist {} {}'.format(ch_name, binning_string)][0], - linestyle='', - marker=next(markers), alpha=.7, label=binning_string) - - legend_title = "Prep. state \n[%s]" % ', '.join(data_dict['qubit_names']) - ax.legend(title=legend_title, loc=1) - ax.set_ylabel('Counts') - # arbitrary units as we use optimal weights - set_xlabel(ax, ch_name, 'a.u.') - - if title is not None: - ax.set_title(title) + def process_data(self): + n_qubits = len(self.qubits) + _cycle = 2**n_qubits + states = ['0', '1'] + if self.f_state: + _cycle = 3**n_qubits + states = ['0', '1', '2'] + if self.heralded_init: + _cycle *= 2 + combinations = [''.join(s) for s in itertools.product(states, repeat=n_qubits)] + self.combinations = combinations + # Sort acquisition channels + _channels = [ name.decode() for name in self.raw_data_dict['value_names'] ] + acq_channel_dict = { q : (None, None) for q in self.qubits } + for q in self.qubits: + _channel_I = [i for i, s in enumerate(_channels) if f'{q} I' in s] + _channel_Q = [i for i, s in enumerate(_channels) if f'{q} Q' in s] + assert len(_channel_I) == 1 + assert len(_channel_Q) == 1 + acq_channel_dict[q] = (_channel_I[0]+1, _channel_Q[0]+1) + # Sort qubit shots per state + raw_shots = {q:None for q in self.qubits} + self.qoi = {} + for q_idx, q in enumerate(self.qubits): + raw_shots[q] = self.raw_data_dict['data'][:,acq_channel_dict[q]] + self.proc_data_dict[q] = {} + self.proc_data_dict[q]['shots_0_IQ'] = [] + self.proc_data_dict[q]['shots_1_IQ'] = [] + if self.f_state: + self.proc_data_dict[q]['shots_2_IQ'] = [] + for i, comb in enumerate(combinations): + _idx = (1+self.heralded_init)*i+self.heralded_init + if comb[q_idx] == '0': + self.proc_data_dict[q]['shots_0_IQ'] += list(raw_shots[q][_idx::_cycle]) + elif comb[q_idx] == '1': + self.proc_data_dict[q]['shots_1_IQ'] += list(raw_shots[q][_idx::_cycle]) + elif (comb[q_idx] == '2') and self.f_state: + self.proc_data_dict[q]['shots_2_IQ'] += list(raw_shots[q][_idx::_cycle]) + # Convert list into array + self.proc_data_dict[q]['shots_0_IQ'] = np.array(self.proc_data_dict[q]['shots_0_IQ']) + self.proc_data_dict[q]['shots_1_IQ'] = np.array(self.proc_data_dict[q]['shots_1_IQ']) + if self.f_state: + self.proc_data_dict[q]['shots_2_IQ'] = np.array(self.proc_data_dict[q]['shots_2_IQ']) + ##################################################### + # From this point onward raw shots has shape + # (nr_shots, nr_quadratures). + # Post select based on heralding measurement result. + ##################################################### + if self.heralded_init: + # pass # Not implemented yet + # Post-selection mask + _mask = np.ones(len(raw_shots[self.qubits[0]][0::2])) + for q_idx, q in enumerate(self.qubits): + # Train classifier for qubit + Shots_0 = self.proc_data_dict[q]['shots_0_IQ'] # Parse data for + Shots_1 = self.proc_data_dict[q]['shots_1_IQ'] # classifier + clf, _, _, _ = _Classify_qubit_calibration_shots(Shots_0, Shots_1) + # Sort heralding measurement shots + _ps_shots = raw_shots[q][0::2] + _ps_shots_dig = clf.predict(_ps_shots) + _mask *= np.array([1 if s == 0 else np.nan for s in _ps_shots_dig]) + # Remove heralding measurement shots + for q_idx, q in enumerate(self.qubits): + raw_shots[q] = raw_shots[q][1::2] + else: + _mask = np.ones(len(raw_shots[self.qubits[0]])) + + # Sort shots after handling post-selection + for q_idx, q in enumerate(self.qubits): + # Rotate data along 01 + center_0 = np.mean(self.proc_data_dict[q]['shots_0_IQ'], axis=0) + center_1 = np.mean(self.proc_data_dict[q]['shots_1_IQ'], axis=0) + raw_shots[q] = _rotate_and_center_data( + raw_shots[q][:,0], raw_shots[q][:,1], center_0, center_1) + self.proc_data_dict[q]['Shots_0'] = [] + self.proc_data_dict[q]['Shots_1'] = [] + if self.f_state: + self.proc_data_dict[q]['Shots_2'] = [] + for i, comb in enumerate(combinations): + self.proc_data_dict[q][f'shots_{comb}'] = raw_shots[q][i::_cycle] + if comb[q_idx] == '0': + self.proc_data_dict[q]['Shots_0'] += list(raw_shots[q][i::_cycle][~np.isnan(_mask[i::_cycle])]) + elif comb[q_idx] == '1': + self.proc_data_dict[q]['Shots_1'] += list(raw_shots[q][i::_cycle][~np.isnan(_mask[i::_cycle])]) + elif (comb[q_idx] == '2') and self.f_state: + self.proc_data_dict[q]['Shots_2'] += list(raw_shots[q][i::_cycle][~np.isnan(_mask[i::_cycle])]) + # Convert list into array + self.proc_data_dict[q]['Shots_0'] = np.array(self.proc_data_dict[q]['Shots_0']) + self.proc_data_dict[q]['Shots_1'] = np.array(self.proc_data_dict[q]['Shots_1']) + if self.f_state: + self.proc_data_dict[q]['Shots_2'] = np.array(self.proc_data_dict[q]['Shots_2']) + ################################################################## + # From this point onward Shots_ contains post-selected + # shots of state and has shape (nr_ps_shots, nr_quadtrs). + # Next we will analyze shots projected along axis and + # therefore use a single quadrature. shots_ will be used + # to denote that array of shots. + ################################################################## + for q_idx, q in enumerate(self.qubits): + ############################################################## + # Analyse data in quadrature of interest + # (01 projection axis) + ############################################################## + shots_0 = self.proc_data_dict[q]['Shots_0'][:,0] + shots_1 = self.proc_data_dict[q]['Shots_1'][:,0] + # total number of shots (after postselection) + n_shots_0 = len(shots_0) + n_shots_1 = len(shots_1) + # find range + _all_shots = np.concatenate((shots_0, shots_1)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x0, n0 = np.unique(shots_0, return_counts=True) + x1, n1 = np.unique(shots_1, return_counts=True) + # Calculate fidelity and optimal threshold + Fid_raw, threshold_raw = _calculate_fid_and_threshold(x0, n0, x1, n1) + ###################### + # Fit data + ###################### + # Histogram of shots for 0 and 1 + h0, bin_edges = np.histogram(shots_0, bins=100, range=_range) + h1, bin_edges = np.histogram(shots_1, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt0, popt1, params_01 = _fit_double_gauss(bin_centers, h0, h1) + # Save data in processed data dictionary + self.proc_data_dict[q]['n_shots_0'] = n_shots_0 + self.proc_data_dict[q]['n_shots_1'] = n_shots_1 + self.proc_data_dict[q]['bin_centers'] = bin_centers + self.proc_data_dict[q]['h0'] = h0 + self.proc_data_dict[q]['h1'] = h1 + self.proc_data_dict[q]['popt0'] = popt0 + self.proc_data_dict[q]['popt1'] = popt1 + self.proc_data_dict[q]['threshold_raw'] = threshold_raw + self.proc_data_dict[q]['F_assignment_raw'] = Fid_raw + self.proc_data_dict[q]['F_fit'] = params_01['Fid_fit'] + self.proc_data_dict[q]['F_discr'] = params_01['Fid_discr'] + self.proc_data_dict[q]['residual_excitation'] = params_01['P_e0'] + self.proc_data_dict[q]['relaxation_events'] = params_01['P_g1'] + # self.proc_data_dict[q]['effective_temperature'] = params_01['T_eff'] + # Save quantities of interest + self.qoi[q] = {} + self.qoi[q]['SNR'] = params_01['SNR'] + self.qoi[q]['F_a'] = Fid_raw + self.qoi[q]['F_d'] = params_01['Fid_discr'] + ############################################ + # If second state data is use classifier + # to assign states in the IQ plane and + # calculate qutrit fidelity. + ############################################ + if not self.f_state: + # Parse data for classifier + Shots_0 = self.proc_data_dict[q]['Shots_0'] + Shots_1 = self.proc_data_dict[q]['Shots_1'] + clf, Fid_dict, M, dec_bounds = _Classify_qubit_calibration_shots(Shots_0, Shots_1) + # Save outcome + self.proc_data_dict[q]['classifier'] = clf + self.proc_data_dict[q]['dec_bounds'] = dec_bounds + self.proc_data_dict[q]['Fid_dict'] = Fid_dict + self.qoi[q]['Fid_dict'] = Fid_dict + self.qoi[q]['Assignment_matrix'] = M + # Analyze shots + _Analyse_qubit_shots_along_decision_boundaries( + qubit=q, + Shots_0=Shots_0, Shots_1=Shots_1, + dec_bounds=dec_bounds, + proc_data_dict=self.proc_data_dict) + else: + # Parse data for classifier + Shots_0 = self.proc_data_dict[q]['Shots_0'] + Shots_1 = self.proc_data_dict[q]['Shots_1'] + Shots_2 = self.proc_data_dict[q]['Shots_2'] + clf, Fid_dict, M, dec_bounds = _Classify_qutrit_calibration_shots(Shots_0, Shots_1, Shots_2) + # Save outcome + self.proc_data_dict[q]['classifier'] = clf + self.proc_data_dict[q]['dec_bounds'] = dec_bounds + self.proc_data_dict[q]['Fid_dict'] = Fid_dict + self.qoi[q]['Fid_dict'] = Fid_dict + self.qoi[q]['Assignment_matrix'] = M + # Analyze shots + _Analyse_qutrit_shots_along_decision_boundaries( + qubit=q, + Shots_0=Shots_0, Shots_1=Shots_1, Shots_2=Shots_2, + dec_bounds=dec_bounds, + proc_data_dict=self.proc_data_dict) + + ############################################ + # Calculate Mux assignment fidelity matrix # + ############################################ + # Assign readout shots for each input state + _res = { q : {} for q in self.qubits} + for i, comb_i in enumerate(self.combinations): + # Assign shots for each qubit + for q in self.qubits: + _clf = self.proc_data_dict[q]['classifier'] + _res[q][comb_i] = np.array(_clf.predict(self.proc_data_dict[q][f'shots_{comb_i}']).astype(float)) + # mark post-selected shots + _res[q][comb_i] *= _mask[i::_cycle] + # _res now holds the outcome of shots for each qubit + __res = [None for comb in self.combinations] + for i, comb in enumerate(self.combinations): + nr_shots_per_case = len(self.proc_data_dict[self.qubits[0]][f'shots_{comb}']) + # Convert to matrix of shape [qubit, shot] + _aux = np.vstack([_res[q][comb] for q in _res.keys() ]) + # Convert to list of multiplexed assigned outcomes + __res[i] = [ ''.join(_aux[:,k].astype(int).astype(str)) \ + if all(~np.isnan(_aux[:,k])) else np.nan \ + for k in range(nr_shots_per_case)] + # __res now holds the mux assigned outcome of shots (as strings) + if len(self.qubits) > 4: + # Print message as this might take a while... + print('Computing assignment fidelity matrix...') + # Compute assignment fidelity matrix + nr_shots_per_case = np.array([ np.sum(np.array(r)!='nan') for r in __res ]) + M = np.zeros((len(self.combinations), len(self.combinations))) + for j, assigned_state in enumerate(self.combinations): + M[:, j] = np.nansum(np.array(__res) == assigned_state, axis=1)/\ + nr_shots_per_case + print(j/len(self.combinations)) + self.proc_data_dict['Mux_assignment_matrix'] = M + ############################################ + # Calculate Mux cross-fidelity matrix # + ############################################ + if not self.f_state: + if len(self.qubits) > 4: + # Print message as this might take a while... + print('Computing cross-fidelity matrix...') + C = calc_cross_fidelity_matrix(self.combinations, M) + self.proc_data_dict['Cross_fidelity_matrix'] = C + + print('done') + def prepare_plots(self): + self.axs_dict = {} + for q in self.qubits: + # Projected data histogram + fig, ax = plt.subplots(figsize=(5,4), dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict[f'main_{q}'] = ax + self.figs[f'main_{q}'] = fig + self.plot_dicts[f'main_{q}'] = { + 'plotfn': ssro_hist_plotfn, + 'ax_id': f'main_{q}', + 'bin_centers': self.proc_data_dict[q]['bin_centers'], + 'h0': self.proc_data_dict[q]['h0'], + 'h1': self.proc_data_dict[q]['h1'], + 'popt0': self.proc_data_dict[q]['popt0'], + 'popt1': self.proc_data_dict[q]['popt1'], + 'threshold': self.proc_data_dict[q]['threshold_raw'], + 'Fid_raw': self.qoi[q]['F_a'], + 'Fid_fit': self.proc_data_dict[q]['F_fit'], + 'Fid_disc': self.qoi[q]['F_d'], + 'SNR': self.qoi[q]['SNR'], + 'P_e0': self.proc_data_dict[q]['residual_excitation'], + 'P_g1': self.proc_data_dict[q]['relaxation_events'], + 'n_shots_0': self.proc_data_dict[q]['n_shots_0'], + 'n_shots_1': self.proc_data_dict[q]['n_shots_1'], + 'T_eff': None, + 'qubit': q, + 'timestamp': self.timestamp + } + # IQ plane histogram + fig, ax = plt.subplots(figsize=(4,4), dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict[f'main2_{q}'] = ax + self.figs[f'main2_{q}'] = fig + self.plot_dicts[f'main2_{q}'] = { + 'plotfn': ssro_IQ_plotfn, + 'ax_id': f'main2_{q}', + 'shots_0': self.proc_data_dict[q]['shots_0_IQ'], + 'shots_1': self.proc_data_dict[q]['shots_1_IQ'], + 'shots_2': self.proc_data_dict[q]['shots_2_IQ'] if self.f_state else None, + 'shots_3': None, + 'qubit': q, + 'timestamp': self.timestamp + } + if self.f_state: + # IQ plane histogram with classifier + fig = plt.figure(figsize=(8,4), dpi=100) + axs = [fig.add_subplot(121), + fig.add_subplot(322), + fig.add_subplot(324), + fig.add_subplot(326)] + # fig.patch.set_alpha(0) + self.axs_dict[f'main3_{q}'] = axs[0] + self.figs[f'main3_{q}'] = fig + self.plot_dicts[f'main3_{q}'] = { + 'plotfn': ssro_IQ_projection_plotfn, + 'ax_id': f'main3_{q}', + 'shots_0': self.proc_data_dict[q]['Shots_0'], + 'shots_1': self.proc_data_dict[q]['Shots_1'], + 'shots_2': self.proc_data_dict[q]['Shots_2'], + 'projection_01': self.proc_data_dict[q]['projection_01'], + 'projection_12': self.proc_data_dict[q]['projection_12'], + 'projection_02': self.proc_data_dict[q]['projection_02'], + 'classifier': self.proc_data_dict[q]['classifier'], + 'dec_bounds': self.proc_data_dict[q]['dec_bounds'], + 'Fid_dict': self.proc_data_dict[q]['Fid_dict'], + 'qubit': q, + 'timestamp': self.timestamp + } -def plot_2D_ssro_histogram(xvals, yvals, zvals, xlabel, xunit, ylabel, yunit, zlabel, zunit, - xlim=None, ylim=None, - title='', - cmap='viridis', - cbarwidth='10%', - cbarpad='5%', - no_label=False, - ax=None, cax=None, **kw): - if ax is None: - f, ax = plt.subplots() - if not no_label: - ax.set_title(title) + # Assignment fidelity matrix (only plot if qubits are less than 5) + fig, ax = plt.subplots(figsize=(8,8), dpi=1200) + # fig.patch.set_alpha(0) + self.axs_dict[f'Mux_assignment_matrix'] = ax + self.figs[f'Mux_assignment_matrix'] = fig + self.plot_dicts[f'Mux_assignment_matrix'] = { + 'plotfn': mux_assignment_matrix_plotfn, + 'ax_id': 'Mux_assignment_matrix', + 'M': self.proc_data_dict['Mux_assignment_matrix'], + 'Qubits': self.qubits, + 'combinations': self.combinations, + 'timestamp': self.timestamp + } + # Cross-fidelity matrix + figsize = np.array(np.shape(self.proc_data_dict['Cross_fidelity_matrix']))*1 + fig, ax = plt.subplots(figsize=figsize, dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict[f'Cross_fidelity_matrix'] = ax + self.figs[f'Cross_fidelity_matrix'] = fig + self.plot_dicts[f'Cross_fidelity_matrix'] = { + 'plotfn': cross_fidelity_matrix_plotfn, + 'ax_id': 'Cross_fidelity_matrix', + 'C': self.proc_data_dict['Cross_fidelity_matrix'], + 'Qubits': self.qubits, + 'timestamp': self.timestamp + } - # Plotting the "heatmap" - out = flex_colormesh_plot_vs_xy(xvals, yvals, zvals, ax=ax, - plot_cbar=True, cmap=cmap) - # Adding the colorbar - if cax is None: - ax.ax_divider = make_axes_locatable(ax) - ax.cax = ax.ax_divider.append_axes( - 'right', size=cbarwidth, pad=cbarpad) + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def mux_assignment_matrix_plotfn( + M, + Qubits, + timestamp, + combinations, + ax, **kw): + + fig = ax.get_figure() + # Plot diagonal terms + M_aux = np.diag(np.diag(M)) + vmin = min([np.min(np.diag(M)), .90]) + im1 = ax.matshow(M_aux*100, cmap='Blues', vmin=vmin*100, vmax=100) + # Plot off-diagonal terms + M_aux = M * (np.diag(np.ones(M.shape[0])*np.nan)+1) # make diagonal np.nan + vmax = max([np.nanmax(M_aux), .05]) + im = ax.matshow(M_aux*100, cmap='Reds', vmax=vmax*100, vmin=0) + # Write numbers + n = len(combinations) + if n < 2**6: + for i in range(n): + for j in range(n): + c = M[j,i] + # diagonal terms + if i==j: + if abs(c) > (1+vmin)/2: + ax.text(i, j, '{:.0f}'.format(c*100), va='center', ha='center', + color = 'white', size=8) + else: + ax.text(i, j, '{:.0f}'.format(c*100), va='center', ha='center', + size=8) + # off-diagonal terms + else: + if abs(c) > vmax/2: + ax.text(i, j, '{:.0f}'.format(c*100), va='center', ha='center', + color = 'white', size=8) + elif abs(c)>.01: + ax.text(i, j, '{:.0f}'.format(c*100), va='center', ha='center', + size=8) + ax.set_xticks(np.arange(n)) + ax.set_yticks(np.arange(n)) + _labels = [''.join([f'{comb[i]}' for i in range(len(Qubits))]) for comb in combinations] + ax.set_xticklabels([f'${label}$' for label in _labels], size=8, rotation=90) + ax.set_yticklabels([f'${label}$' for label in _labels], size=8) else: - ax.cax = cax - ax.cbar = plt.colorbar(out['cmap'], cax=ax.cax) - - # Setting axis limits aspect ratios and labels - ax.set_aspect(1) - set_xlabel(ax, xlabel, xunit) - set_ylabel(ax, ylabel, yunit) - set_cbarlabel(ax.cbar, zlabel, zunit) - if xlim is None: - xlim = np.min([xvals, yvals]), np.max([xvals, yvals]) - ax.set_xlim(xlim) - if ylim is None: - ylim = np.min([xvals, yvals]), np.max([xvals, yvals]) - ax.set_ylim(ylim) + _list = list(np.arange(n)[::n//8])+[n-1] + _combinations = combinations[::n//8] + combinations[-1:] + ax.set_xticks(_list) + ax.set_yticks(_list) + _labels = [''.join([f'{comb[i]}' for i in range(len(Qubits))]) for comb in _combinations] + ax.set_xticklabels([f'${label}$' for label in _labels], size=8, rotation=90) + ax.set_yticklabels([f'${label}$' for label in _labels], size=8) + ax.tick_params(axis="x", bottom=True, labelbottom=True, top=False, labeltop=False) + ax.set_xlabel(f'Assigned state $<'+''.join([f'S_\mathrm{{{Qubits[i]}}}' for i in range(len(Qubits))])+'>$') + ax.set_ylabel('Input state $<'+''.join([f'S_\mathrm{{{Qubits[i]}}}' for i in range(len(Qubits))])+'>$') + cb = fig.colorbar(im, orientation='vertical', aspect=35) + cb1 = fig.colorbar(im1, orientation='vertical', aspect=35) + cb1.ax.yaxis.set_ticks_position('left') + pos = ax.get_position() + pos = [ pos.x0+.6, pos.y0, pos.width, pos.height ] + fig.axes[-2].set_position(pos) + pos = ax.get_position() + pos = [ pos.x0+.575, pos.y0, pos.width, pos.height ] + fig.axes[-1].set_position(pos) + cb.set_label('Assignment probability (%)', rotation=-90, labelpad=15) + ax.set_title(f'{timestamp}\nMultiplexed assignment probability matrix\n{" ".join(Qubits)}') + +def cross_fidelity_matrix_plotfn( + C, + Qubits, + timestamp, + ax=None, **kw): + f = ax.get_figure() + alpha_reds = cmap_to_alpha(cmap=pl.cm.Reds) + colors = [(0.58, 0.404, 0.741), (0, 0, 0)] + cm = LinearSegmentedColormap.from_list('my_purple', colors) + alpha_blues = cmap_first_to_alpha(cmap=cm) + # red_im = ax.matshow(C*100, + # cmap=alpha_reds, clim=(-10., 10)) + red_im = ax.matshow(C*100, + cmap='RdBu', clim=(-10., 10)) + blue_im = ax.matshow(C*100, + cmap=alpha_blues, clim=(90, 100)) + caxb = f.add_axes([0.91, 0.58, 0.02, 0.3]) + caxr = f.add_axes([0.91, 0.11, 0.02, 0.3]) + ax.figure.colorbar(red_im, ax=ax, cax=caxr) + ax.figure.colorbar(blue_im, ax=ax, cax=caxb) + rows, cols = np.shape(C) + for i in range(rows): + for j in range(cols): + c = C[i, j] + if c > .05 or c <-0.05: + col = 'white' + else: + col = 'black' + ax.text(j, i, '{:.1f}'.format(c*100), + va='center', ha='center', color=col) + ax.set_xticklabels(Qubits) + ax.set_xticks(np.arange(len(Qubits))) + ax.set_yticklabels(Qubits) + ax.set_yticks(np.arange(len(Qubits))) + ax.set_ylim(len(Qubits)-.5, -.5) + # matrix[i,j] => i = column, j = row + ax.set_ylabel(r'Prepared qubit, $q_i$') + ax.set_xlabel(r'Classified qubit $q_j$') + ax.tick_params(axis="x", bottom=True, labelbottom=True, top=False, labeltop=False) + qubit_labels_str = ', '.join(Qubits) + txtstr = f'{timestamp}\nCross fidelity matrix '+qubit_labels_str + + ax.set_title(txtstr) \ No newline at end of file diff --git a/pycqed/analysis_v2/repeated_stabilizer_analysis.py b/pycqed/analysis_v2/repeated_stabilizer_analysis.py new file mode 100644 index 0000000000..7d8fdff0be --- /dev/null +++ b/pycqed/analysis_v2/repeated_stabilizer_analysis.py @@ -0,0 +1,312 @@ +# ------------------------------------------- +# Module containing base-analysis implementation for repeated stabilizer analysis. +# ------------------------------------------- +from abc import ABC, ABCMeta, abstractmethod +import os +from typing import List, Union, Dict, Callable, Any, Optional +from enum import Enum +import pycqed.measurement.hdf5_data as hd5 +from pycqed.analysis_v2.base_analysis import BaseDataAnalysis +from pycqed.analysis.analysis_toolbox import get_datafilepath_from_timestamp +from qce_circuit.language.intrf_declarative_circuit import ( + InitialStateContainer, + InitialStateEnum, +) +from qce_circuit.connectivity.intrf_channel_identifier import ( + IQubitID, + QubitIDObj, +) +from qce_circuit.library.repetition_code.circuit_components import ( + IRepetitionCodeDescription, + RepetitionCodeDescription, +) +from qce_circuit.library.repetition_code.repetition_code_connectivity import Repetition9Code +from qce_circuit.visualization.visualize_layout.display_connectivity import plot_gate_sequences +from qce_interp import ( + DataManager, + QubitIDObj, + ParityType, + Surface17Layer, + IErrorDetectionIdentifier, + ILabeledErrorDetectionIdentifier, + ErrorDetectionIdentifier, + LabeledErrorDetectionIdentifier, + ISyndromeDecoder, + ILabeledSyndromeDecoder, + Distance5LookupTableDecoder, + LabeledSyndromeDecoder, + StateAcquisitionContainer, + MWPMDecoder, +) +from qce_interp.decoder_examples.majority_voting import MajorityVotingDecoder +from qce_interp.interface_definitions.intrf_syndrome_decoder import IDecoder +from qce_interp.interface_definitions.intrf_error_identifier import ( + DataArrayLabels, +) +from qce_interp.visualization import ( + plot_state_classification, + plot_defect_rate, + plot_all_defect_rate, + plot_pij_matrix, + plot_compare_fidelity, +) +from qce_interp.visualization.plotting_functionality import ( + SubplotKeywordEnum, + IFigureAxesPair, +) +import matplotlib.pyplot as plt +import itertools +import numpy as np +from qce_interp.visualization.plotting_functionality import ( + construct_subplot, + IFigureAxesPair, + LabelFormat, + AxesFormat, + SubplotKeywordEnum, +) + + +class RepeatedStabilizerAnalysis(BaseDataAnalysis): + + # region Class Constructor + def __init__(self, involved_qubit_names: List[str], qec_cycles: List[int], initial_state: InitialStateContainer, t_start: str = None, t_stop: str = None, label: str = '', data_file_path: str = None, close_figs: bool = True, options_dict: dict = None, extract_only: bool = False, do_fitting: bool = False, save_qois: bool = True): + super().__init__(t_start, t_stop, label, data_file_path, close_figs, options_dict, extract_only, do_fitting, save_qois) + # Store arguments + self.involved_qubit_ids: List[IQubitID] = [QubitIDObj(name) for name in involved_qubit_names] + self.involved_data_qubit_ids: List[IQubitID] = [qubit_id for qubit_id in self.involved_qubit_ids if qubit_id in Surface17Layer().data_qubit_ids] + self.involved_ancilla_qubit_ids: List[IQubitID] = [qubit_id for qubit_id in self.involved_qubit_ids if qubit_id in Surface17Layer().ancilla_qubit_ids] + self.qec_cycles: List[int] = qec_cycles + self.initial_state: InitialStateContainer = initial_state + self.circuit_description: IRepetitionCodeDescription = RepetitionCodeDescription.from_connectivity( + involved_qubit_ids=self.involved_qubit_ids, + connectivity=Repetition9Code(), + ) + # Required attributes + self.params_dict: Dict = {} + self.numeric_params: Dict = {} + # Obtain data file path + self.get_timestamps() + self.timestamp = self.timestamps[0] + self.data_file_path = get_datafilepath_from_timestamp(self.timestamp) + # Specify data keys + self._raw_data_key: str = 'data' + self._raw_value_names_key: str = 'value_names' + # endregion + + def extract_data(self): + """ + This is a new style (sept 2019) data extraction. + This could at some point move to a higher level class. + """ + param_spec = { + self._raw_data_key: ('Experimental Data/Data', 'dset'), + self._raw_value_names_key: ('Experimental Data', 'attr:value_names'), + } + self.raw_data_dict = hd5.extract_pars_from_datafile(self.data_file_path, param_spec) + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(self.data_file_path)[0] + + def process_data(self): + """ + process_data: overloaded in child classes, + takes care of mundane tasks such as binning filtering etc + """ + self.data_manager: DataManager = DataManager.from_file_path( + file_path=self.data_file_path, + qec_rounds=self.qec_cycles, + heralded_initialization=True, + qutrit_calibration_points=True, + involved_data_qubit_ids=self.involved_data_qubit_ids, + involved_ancilla_qubit_ids=self.involved_ancilla_qubit_ids, + expected_parity_lookup=self.initial_state_to_expected_parity( + initial_state=self.initial_state, + involved_ancilla_qubit_ids=self.involved_ancilla_qubit_ids, + ), + device_layout=Surface17Layer(), + ) + error_identifier: IErrorDetectionIdentifier = self.data_manager.get_error_detection_classifier( + use_heralded_post_selection=True, + use_computational_parity=True, + ) + self.labeled_error_identifier: ILabeledErrorDetectionIdentifier = LabeledErrorDetectionIdentifier( + error_identifier, + ) + self.decoder_majority: IDecoder = MajorityVotingDecoder( + error_identifier=error_identifier, + ) + self.decoder_mwpm: IDecoder = MWPMDecoder( + error_identifier=error_identifier, + circuit_description=self.circuit_description, + initial_state_container=self.initial_state, + ) + + def prepare_plots(self): + """ + Defines a default plot by setting up the plotting dictionaries to + specify what is to be plotted + """ + # Data allocation + self.axs_dict = {} + self.plot_dicts = {} + timestamp: str = self.timestamp + + # Pij matrix + title: str = 'pij_matrix' + fig, ax = plt.subplots() + self.axs_dict[title] = ax + self.figs[title] = fig + self.plot_dicts[title] = { + 'plotfn': plot_function_wrapper(plot_pij_matrix), + 'error_identifier': self.labeled_error_identifier, + 'included_rounds': self.data_manager.qec_rounds, + 'timestamp': timestamp, + } + # Logical fidelity + title: str = 'logical_fidelity' + fig, ax = plt.subplots() + self.axs_dict[title] = ax + self.figs[title] = fig + self.plot_dicts[title] = { + 'plotfn': plot_function_wrapper(plot_compare_fidelity), + 'decoders': [self.decoder_mwpm, self.decoder_majority], + 'included_rounds': self.data_manager.qec_rounds, + 'target_state': self.initial_state, + 'timestamp': timestamp, + } + # Gate sequence + title: str = 'circuit_layout' + sequence = self.circuit_description.to_sequence() + sequence_count: int = sequence.gate_sequence_count + fig, axs = plt.subplots(figsize=(5 * sequence_count, 5), ncols=sequence_count) + self.axs_dict[title] = axs[0] + self.figs[title] = fig + self.plot_dicts[title] = { + 'plotfn': plot_function_wrapper(plot_gate_sequences), + 'description': sequence, + 'timestamp': timestamp, + } + + # Defect rates (individual) + for qubit_id in self.involved_ancilla_qubit_ids: + title: str = f'defect_rate_{qubit_id.id}' + fig, ax = plt.subplots() + self.axs_dict[title] = ax + self.figs[title] = fig + self.plot_dicts[title] = { + 'plotfn': plot_function_wrapper(plot_defect_rate), + 'error_identifier': self.labeled_error_identifier, + 'qubit_id': qubit_id, + 'qec_cycles': self.data_manager.qec_rounds[-1], + 'timestamp': timestamp, + } + # Defect rates (all) + title: str = 'all_defect_rates' + fig, ax = plt.subplots() + self.axs_dict[title] = ax + self.figs[title] = fig + self.plot_dicts[title] = { + 'plotfn': plot_function_wrapper(plot_all_defect_rate), + 'error_identifier': self.labeled_error_identifier, + 'included_rounds': self.data_manager.qec_rounds[-1], + 'timestamp': timestamp, + } + # IQ readout (individual) + for qubit_id in self.involved_qubit_ids: + title: str = f'IQ_readout_histogram_{qubit_id.id}' + fig, ax = plt.subplots() + self.axs_dict[title] = ax + self.figs[title] = fig + self.plot_dicts[title] = { + 'plotfn': plot_function_wrapper(plot_state_classification), + 'state_classifier': self.data_manager.get_state_acquisition(qubit_id=qubit_id), + 'timestamp': timestamp, + } + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', True): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + + def analyze_fit_results(self): + """ + Do analysis on the results of the fits to extract quantities of + interest. + """ + # raise NotImplemented + pass + + @staticmethod + def initial_state_to_expected_parity(initial_state: InitialStateContainer, involved_ancilla_qubit_ids: List[IQubitID]) -> Dict[IQubitID, ParityType]: + assert initial_state.distance == len(involved_ancilla_qubit_ids) + 1, f"Expects N number of initial states and N-1 number of ancilla's. instead {initial_state.distance} != {len(involved_ancilla_qubit_ids)-1}." + result: Dict[IQubitID, ParityType] = {} + + for i, qubit_id in enumerate(involved_ancilla_qubit_ids): + state_a: int = initial_state.as_array[i] + state_b: int = initial_state.as_array[i+1] + even_parity: bool = state_a == state_b + if even_parity: + result[qubit_id] = ParityType.EVEN + else: + result[qubit_id] = ParityType.ODD + return result + + +def plot_function_wrapper(plot_function: Callable[[Any], Any]) -> Callable[[Optional[plt.Axes], Any], Any]: + + def method(ax: Optional[plt.Axes] = None, *args, **kwargs) -> IFigureAxesPair: + # Data allocation + timestamp: str = kwargs.pop("timestamp", "not defined") + fig = ax.get_figure() + axs = fig.get_axes() + fig.suptitle(f'ts: {timestamp}\n') + + kwargs[SubplotKeywordEnum.HOST_AXES.value] = (fig, axs) + if len(axs) == 1: + kwargs[SubplotKeywordEnum.HOST_AXES.value] = (fig, axs[0]) + return plot_function( + *args, + **kwargs, + ) + return method + + +if __name__ == "__main__": + from importlib import reload + from typing import List, Dict, Any + from pycqed.analysis import measurement_analysis as ma + from pycqed.analysis_v2 import repeated_stabilizer_analysis as repsa + reload(repsa) + from pycqed.analysis_v2.repeated_stabilizer_analysis import ( + RepeatedStabilizerAnalysis, + InitialStateContainer, + InitialStateEnum, + ) + import itertools as itt + + datadir = r'C:\Experiments\202208_Uran\Data' + ma.a_tools.datadir = datadir + + involved_ancilla_ids=['X3', 'X4'] + involved_data_ids=['D7', 'D8', 'D9'] + fillvalue = None + involved_qubit_names: List[str] = [ + item + for pair in itt.zip_longest(involved_data_ids, involved_ancilla_ids, fillvalue=fillvalue) for item in pair if + item != fillvalue + ] + + analysis = RepeatedStabilizerAnalysis( + involved_qubit_names=involved_qubit_names, + qec_cycles=[i for i in range(0, 10, 1)], + initial_state=InitialStateContainer.from_ordered_list([ + InitialStateEnum.ZERO, + InitialStateEnum.ZERO, + InitialStateEnum.ZERO, + ]), + label="Repeated_stab_meas", + ) + print(analysis.get_timestamps()) + analysis.run_analysis() \ No newline at end of file diff --git a/pycqed/analysis_v2/timedomain_analysis.py b/pycqed/analysis_v2/timedomain_analysis.py index 514e67ccab..cbaa149a46 100644 --- a/pycqed/analysis_v2/timedomain_analysis.py +++ b/pycqed/analysis_v2/timedomain_analysis.py @@ -1,9 +1,12 @@ +import os from importlib import reload import lmfit import numpy as np from uncertainties import ufloat from scipy.stats import sem from collections import OrderedDict +import matplotlib.pyplot as plt +import pycqed.measurement.hdf5_data as hd5 from pycqed.analysis import fitting_models as fit_mods reload(fit_mods) from pycqed.analysis import analysis_toolbox as a_tools @@ -411,8 +414,10 @@ def prepare_fitting(self): # This enforces the oscillation to start at the equator # and ensures that any over/under rotation is absorbed in the # frequency - guess_pars["amplitude"].value = 0.5 + guess_pars["amplitude"].value = 0.45 guess_pars["amplitude"].vary = True + guess_pars["amplitude"].min = 0.4 + guess_pars["amplitude"].max = 0.5 guess_pars["offset"].value = 0.5 guess_pars["offset"].vary = True @@ -1955,7 +1960,7 @@ def _prepare_main_oscillation_figure(self): "xpos": 1.45, "plotfn": self.plot_text, "box_props": "fancy", - "line_kws": {"alpha": 0}, + "line_kws": {"alpha": 1}, "horizontalalignment": "right", "text_string": phase_message, } @@ -2297,3 +2302,128 @@ def prepare_plots(self): def get_intersect(self): return self.proc_data_dict["root"] + + +class FineBiasAnalysis(ba.BaseDataAnalysis): + """ + Behaviour class, + """ + + # region Class Constructor + def __init__( + self, + initial_bias: float, + t_start: str = None, + t_stop: str = None, + data_file_path: str = None, + label: str = "", + options_dict: dict = None, + ): + super().__init__( + t_start=t_start, + t_stop=t_stop, + label=label, + data_file_path=data_file_path, + options_dict=options_dict, + close_figs=True, + extract_only=False, + do_fitting=False, + ) + self.initial_bias: float = initial_bias # A + # endregion + + + # region Interface Methods + def extract_data(self): + """ + This is a new style (sept 2019) data extraction. + This could at some point move to a higher level class. + """ + self.get_timestamps() + self.timestamp = self.timestamps[0] + + data_fp = a_tools.get_datafilepath_from_timestamp(self.timestamp) + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + self.raw_data_dict = hd5.extract_pars_from_datafile(data_fp, param_spec) + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + + def process_data(self): + self.qubit_id = self.raw_data_dict['folder'].split('_')[-1] + self.flux_array: np.ndarray = self.raw_data_dict['data'][:, 0] + missing_fraction_array: np.ndarray = self.raw_data_dict['data'][:, 1] + self.conditional_phase_array: np.ndarray = self.raw_data_dict['data'][:, 2] # Degree + single_qubit_phase_array: np.ndarray = self.raw_data_dict['data'][:, 3] # Degree + + self.leakage_array: np.ndarray = missing_fraction_array / 2 + self.conditional_phase_array -= 180 + self.single_qubit_phase_array = np.mod(single_qubit_phase_array + 180, 360) - 180 + + self.polynomial_fit = np.polyfit(self.flux_array, self.leakage_array, deg=2) + self.polynomials = np.poly1d(self.polynomial_fit) + # Extract the DC bias for minimum L1 according to fit. + self.fitted_min_bias = -self.polynomial_fit[1] / (2 * self.polynomial_fit[0]) + self.minimal_leakage_bias = self.flux_array[self.leakage_array.argmin()] + + def prepare_plots(self): + self.axs_dict = {} + fig, ax = plt.subplots(figsize=(6, 5), dpi=256) + self.axs_dict[f'leakage_arc_trace'] = ax + self.figs[f'leakage_arc_trace'] = fig + self.plot_dicts['leakage_arc_trace'] = { + 'plotfn': self.plot_leakage_arc, + 'ax_id': 'leakage_arc_trace', + 'flux_array': self.flux_array, + 'conditional_phase_array': self.conditional_phase_array, + 'single_qubit_phase_array': self.single_qubit_phase_array, + 'leakage_array': self.leakage_array, + 'initial_dc': self.initial_bias, + 'fitted_min_bias': self.fitted_min_bias, + 'minimal_leakage_bias': self.minimal_leakage_bias, + 'polynomial': self.polynomials, + 'timestamp': self.timestamps[0], + 'qubit_name': self.qubit_id, + } + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + # endregion + + # region Static Class Methods + @staticmethod + def plot_leakage_arc(flux_array: np.ndarray, conditional_phase_array: np.ndarray, + single_qubit_phase_array: np.ndarray, leakage_array: np.ndarray, initial_dc: float, fitted_min_bias: float, minimal_leakage_bias: float, polynomial, + timestamp: str, qubit_name: str, ax, **kw): + fig = ax.get_figure() + axt = ax.twinx() + + axt.plot(flux_array, conditional_phase_array, 'C2o', label='$\\delta\\phi_{2Q}$') + axt.plot(flux_array, single_qubit_phase_array, 'C4o', label='$\\delta\\phi_{1Q}$') # Tim 15-06 + axt.set_ylabel('Phase error (deg)') + axt.set_ylim(top = 180, bottom = -180) # HARDCODED range with hardcoded ticks. Tim 15-06 + axt.set_yticks(np.arange(-180, 180+1, 20)) + axt.legend(loc='upper left', bbox_to_anchor=(1.1, 1.05)) + + ax.plot(flux_array, leakage_array, 'C3o', label="$\mathrm{L_1}$") + high_resolution_flux_array = np.linspace(min(flux_array), max(flux_array), 101) + high_resolution_polynomial_array = polynomial(high_resolution_flux_array) + ax.plot(high_resolution_flux_array, high_resolution_polynomial_array, 'C0--') + ax.axvline(initial_dc, color='r', alpha=.25, label=fr'Starting bias = {initial_dc * 1e6:.2f} $\mathrm{{\mu A}}$') + ax.axvline(fitted_min_bias, color='b', alpha=.25, label=fr'Suggested bias = {fitted_min_bias * 1e6:.2f} $\mathrm{{\mu A}}$') + ax.axvline(minimal_leakage_bias, color='magenta', alpha=.25, label=fr'Min Leakage bias = {minimal_leakage_bias * 1e6:.2f} $\mathrm{{\mu A}}$') + + ax.set_xlabel('DC flux bias ($\mathrm{\mu A}$)') + ax.set_ylabel('Leakage estimate $\mathrm{L_1}$') + ax.set_ylim(bottom=0) + ax.legend(loc='upper left', bbox_to_anchor=(1.1, 0.9)) + + ax.set_title(f'{timestamp}\n{qubit_name} Leakage vs DC-flux') + return fig, ax + # endregion \ No newline at end of file diff --git a/pycqed/analysis_v2/timing_cal_analysis.py b/pycqed/analysis_v2/timing_cal_analysis.py index b198242b33..d83b9167ff 100644 --- a/pycqed/analysis_v2/timing_cal_analysis.py +++ b/pycqed/analysis_v2/timing_cal_analysis.py @@ -1,13 +1,29 @@ +import os import lmfit +import pycqed.measurement.hdf5_data as hd5 import numpy as np +from typing import Dict, Callable, Any, Optional +import matplotlib.pyplot as plt from collections import OrderedDict from pycqed.analysis import fitting_models as fit_mods from pycqed.analysis import analysis_toolbox as a_tools import pycqed.analysis_v2.base_analysis as ba - from pycqed.analysis import analysis_toolbox as a_tools from collections import OrderedDict from pycqed.analysis import measurement_analysis as ma_old +from qce_utils.control_interfaces.intrf_channel_identifier import IQubitID, QubitIDObj +from qce_utils.addon_pycqed.deserialize_xarray_to_obj import DeserializeBootstrap +from qce_utils.addon_pycqed.object_factories.factory_latency_landscape import LatencyAmplitudeIdentifierFactory +from qce_utils.control_interfaces.datastorage_control.analysis_factories.factory_latency_transmission import ( + LatencyAmplitudeIdentifierAnalysis, + LatencyAmplitudeIdentifier, + LatencyExperimentType, +) +from qce_interp.visualization.plotting_functionality import ( + IFigureAxesPair, + SubplotKeywordEnum, + LabelFormat, +) class Timing_Cal_Flux_Coarse(ba.BaseDataAnalysis): @@ -335,3 +351,140 @@ def annotate_timing_fine_cal(ax, flux_latency, ro_latency, ax.text(1.25, .85, timing_info, transform=ax.transAxes) ax.legend() + + +class TimingMicrowaveFluxAnalysis(ba.BaseDataAnalysis): + + # region Class Constructor + def __init__(self, qubit_id: str, flux_pulse_duration: float, microwave_pulse_duration: float, microwave_pulse_separation: float, t_start: str = None, t_stop: str = None, label: str = '', data_file_path: str = None, close_figs: bool = True, options_dict: dict = None, extract_only: bool = False, do_fitting: bool = False, save_qois: bool = True): + super().__init__(t_start, t_stop, label, data_file_path, close_figs, options_dict, extract_only, do_fitting, save_qois) + # Data allocation + self._qubit_id: IQubitID = QubitIDObj(qubit_id) + self.object_factory = LatencyAmplitudeIdentifierFactory() + self.analysis_factory = LatencyAmplitudeIdentifierAnalysis( + qubit_id=self._qubit_id, + flux_pulse_duration=flux_pulse_duration, + microwave_pulse_duration=microwave_pulse_duration, + buffer_duration=microwave_pulse_separation - flux_pulse_duration, + experiment_type=LatencyExperimentType.DELAY_MICROWAVE_FIX_FLUX, # FIXME: Currently hardcoded option + ) + # Required attributes + self.params_dict: Dict = {} + self.numeric_params: Dict = {} + # Obtain data file path + self.get_timestamps() + self.timestamp = self.timestamps[0] + self.data_file_path = a_tools.get_datafilepath_from_timestamp(self.timestamp) + # Specify data keys + self._raw_data_key: str = 'data' + self._raw_value_names_key: str = 'value_names' + # endregion + + # region Class Methods + def extract_data(self): + """ + This is a new style (sept 2019) data extraction. + This could at some point move to a higher level class. + """ + param_spec = { + self._raw_data_key: ('Experimental Data/Data', 'dset'), + self._raw_value_names_key: ('Experimental Data', 'attr:value_names'), + } + self.raw_data_dict = hd5.extract_pars_from_datafile(self.data_file_path, param_spec) + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(self.data_file_path)[0] + # Construct datastructure + self.latency_identifier: LatencyAmplitudeIdentifier = self.object_factory.construct( + source=DeserializeBootstrap.from_path(self.data_file_path) + ) + + def process_data(self): + """ + process_data: overloaded in child classes, + takes care of mundane tasks such as binning filtering etc + """ + pass + + def prepare_plots(self): + """ + Defines a default plot by setting up the plotting dictionaries to + specify what is to be plotted + """ + # Data allocation + self.axs_dict = {} + self.plot_dicts = {} + timestamp: str = self.timestamp + + # (MW) Latency vs (MW) pulse amplitude + title: str = 'latency_vs_amplitude' + fig, ax = plt.subplots() + self.axs_dict[title] = ax + self.figs[title] = fig + self.plot_dicts[title] = { + 'plotfn': plot_function_wrapper(self.plot_latency_vs_amplitude_intersection), + 'qubit_id': self._qubit_id, + 'latency_identifier': self.latency_identifier, + 'analysis_factory': self.analysis_factory, + 'timestamp': timestamp, + } + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', True): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + + def analyze_fit_results(self): + """ + Do analysis on the results of the fits to extract quantities of + interest. + """ + # raise NotImplemented + pass + + @staticmethod + def plot_latency_vs_amplitude_intersection(qubit_id: IQubitID, latency_identifier: LatencyAmplitudeIdentifier, analysis_factory: LatencyAmplitudeIdentifierAnalysis, **kwargs) -> IFigureAxesPair: + """Plot wrapper.""" + kwargs[SubplotKeywordEnum.LABEL_FORMAT.value] = LabelFormat( + x_label=f'{qubit_id.id} MW latency [ns]', + y_label=f'{qubit_id.id} Integrated value [a.u.]', + ) + fig, ax = analysis_factory.plot_latency_vs_amplitude_intersection( + identifier=latency_identifier, + qubit_id=qubit_id, + **kwargs, + ) + + if analysis_factory.analyzable_latency: + transition_latencies: np.ndarray = analysis_factory.calculate_relative_transition_latencies(identifier=latency_identifier) + relative_latency: float = analysis_factory.calculate_relative_latency(identifier=latency_identifier) + + kwargs[SubplotKeywordEnum.HOST_AXES.value] = fig, ax + fig, ax = analysis_factory.plot_latency_transition_detection( + transition_latencies=transition_latencies, + relative_latency=relative_latency, + **kwargs, + ) + return fig, ax + # endregion + + +def plot_function_wrapper(plot_function: Callable[[Any], Any]) -> Callable[[Optional[plt.Axes], Any], Any]: + def method(ax: Optional[plt.Axes] = None, *args, **kwargs) -> IFigureAxesPair: + # Data allocation + timestamp: str = kwargs.pop("timestamp", "not defined") + fig = ax.get_figure() + axs = fig.get_axes() + fig.suptitle(f'ts: {timestamp}\n') + + kwargs[SubplotKeywordEnum.HOST_AXES.value] = (fig, axs) + if len(axs) == 1: + kwargs[SubplotKeywordEnum.HOST_AXES.value] = (fig, axs[0]) + return plot_function( + *args, + **kwargs, + ) + + return method diff --git a/pycqed/analysis_v2/tomography_analysis.py b/pycqed/analysis_v2/tomography_analysis.py new file mode 100644 index 0000000000..566358d4d7 --- /dev/null +++ b/pycqed/analysis_v2/tomography_analysis.py @@ -0,0 +1,1022 @@ +""" +File containing analysis for tomography-related experiments. +""" +import os +import pycqed.analysis_v2.base_analysis as ba +import matplotlib.pyplot as plt +import numpy as np +from pycqed.analysis.analysis_toolbox import get_datafilepath_from_timestamp +import pycqed.measurement.hdf5_data as h5d +import matplotlib.patches as patches +from mpl_toolkits.mplot3d import axes3d +import matplotlib.colors +import itertools + + +def rotate_and_center_data(I, Q, vec0, vec1, phi=0): + vector = vec1-vec0 + angle = np.arctan(vector[1]/vector[0]) + rot_matrix = np.array([[ np.cos(-angle+phi),-np.sin(-angle+phi)], + [ np.sin(-angle+phi), np.cos(-angle+phi)]]) + proc = np.array((I, Q)) + proc = np.dot(rot_matrix, proc) + return proc.transpose() + +def _calculate_fid_and_threshold(x0, n0, x1, n1): + """ + Calculate fidelity and threshold from histogram data: + x0, n0 is the histogram data of shots 0 (value and occurences), + x1, n1 is the histogram data of shots 1 (value and occurences). + """ + # Build cumulative histograms of shots 0 + # and 1 in common bins by interpolation. + all_x = np.unique(np.sort(np.concatenate((x0, x1)))) + cumsum0, cumsum1 = np.cumsum(n0), np.cumsum(n1) + ecumsum0 = np.interp(x=all_x, xp=x0, fp=cumsum0, left=0) + necumsum0 = ecumsum0/np.max(ecumsum0) + ecumsum1 = np.interp(x=all_x, xp=x1, fp=cumsum1, left=0) + necumsum1 = ecumsum1/np.max(ecumsum1) + # Calculate optimal threshold and fidelity + F_vs_th = (1-(1-abs(necumsum0 - necumsum1))/2) + opt_idxs = np.argwhere(F_vs_th == np.amax(F_vs_th)) + opt_idx = int(round(np.average(opt_idxs))) + F_assignment_raw = F_vs_th[opt_idx] + threshold_raw = all_x[opt_idx] + return F_assignment_raw, threshold_raw + +def _fit_double_gauss(x_vals, hist_0, hist_1, + _x0_guess=None, _x1_guess=None): + ''' + Fit two histograms to a double gaussian with + common parameters. From fitted parameters, + calculate SNR, Pe0, Pg1, Teff, Ffit and Fdiscr. + ''' + from scipy.optimize import curve_fit + # Double gaussian model for fitting + def _gauss_pdf(x, x0, sigma): + return np.exp(-((x-x0)/sigma)**2/2) + global double_gauss + def double_gauss(x, x0, x1, sigma0, sigma1, A, r): + _dist0 = A*( (1-r)*_gauss_pdf(x, x0, sigma0) + r*_gauss_pdf(x, x1, sigma1) ) + return _dist0 + # helper function to simultaneously fit both histograms with common parameters + def _double_gauss_joint(x, x0, x1, sigma0, sigma1, A0, A1, r0, r1): + _dist0 = double_gauss(x, x0, x1, sigma0, sigma1, A0, r0) + _dist1 = double_gauss(x, x1, x0, sigma1, sigma0, A1, r1) + return np.concatenate((_dist0, _dist1)) + # Guess for fit + pdf_0 = hist_0/np.sum(hist_0) # Get prob. distribution + pdf_1 = hist_1/np.sum(hist_1) # + if _x0_guess == None: + _x0_guess = np.sum(x_vals*pdf_0) # calculate mean + if _x1_guess == None: + _x1_guess = np.sum(x_vals*pdf_1) # + _sigma0_guess = np.sqrt(np.sum((x_vals-_x0_guess)**2*pdf_0)) # calculate std + _sigma1_guess = np.sqrt(np.sum((x_vals-_x1_guess)**2*pdf_1)) # + _r0_guess = 0.01 + _r1_guess = 0.05 + _A0_guess = np.max(hist_0) + _A1_guess = np.max(hist_1) + p0 = [_x0_guess, _x1_guess, _sigma0_guess, _sigma1_guess, _A0_guess, _A1_guess, _r0_guess, _r1_guess] + # Bounding parameters + _x0_bound = (-np.inf,np.inf) + _x1_bound = (-np.inf,np.inf) + _sigma0_bound = (0,np.inf) + _sigma1_bound = (0,np.inf) + _r0_bound = (0,1) + _r1_bound = (0,1) + _A0_bound = (0,np.inf) + _A1_bound = (0,np.inf) + bounds = np.array([_x0_bound, _x1_bound, _sigma0_bound, _sigma1_bound, _A0_bound, _A1_bound, _r0_bound, _r1_bound]) + # Fit parameters within bounds + popt, pcov = curve_fit( + _double_gauss_joint, x_vals, + np.concatenate((hist_0, hist_1)), + p0=p0, bounds=bounds.transpose()) + popt0 = popt[[0,1,2,3,4,6]] + popt1 = popt[[1,0,3,2,5,7]] + # Calculate quantities of interest + SNR = abs(popt0[0] - popt1[0])/((abs(popt0[2])+abs(popt1[2]))/2) + P_e0 = popt0[5]*popt0[2]/(popt0[2]*popt0[5] + popt0[3]*(1-popt0[5])) + P_g1 = popt1[5]*popt1[2]/(popt1[2]*popt1[5] + popt1[3]*(1-popt1[5])) + # Fidelity from fit + _range = x_vals[0], x_vals[-1] + _x_data = np.linspace(*_range, 10001) + _h0 = double_gauss(_x_data, *popt0)# compute distrubition from + _h1 = double_gauss(_x_data, *popt1)# fitted parameters. + Fid_fit, threshold_fit = _calculate_fid_and_threshold(_x_data, _h0, _x_data, _h1) + # Discrimination fidelity + _h0 = double_gauss(_x_data, *popt0[:-1], 0)# compute distrubition without residual + _h1 = double_gauss(_x_data, *popt1[:-1], 0)# excitation of relaxation. + Fid_discr, threshold_discr = _calculate_fid_and_threshold(_x_data, _h0, _x_data, _h1) + # return results + qoi = { 'SNR': SNR, + 'P_e0': P_e0, 'P_g1': P_g1, + 'Fid_fit': Fid_fit, 'Fid_discr': Fid_discr } + return popt0, popt1, qoi + +def _decision_boundary_points(coefs, intercepts): + ''' + Find points along the decision boundaries of + LinearDiscriminantAnalysis (LDA). + This is performed by finding the interception + of the bounds of LDA. For LDA, these bounds are + encoded in the coef_ and intercept_ parameters + of the classifier. + Each bound is given by the equation: + y + coef_i[0]/coef_i[1]*x + intercept_i = 0 + Note this only works for LinearDiscriminantAnalysis. + Other classifiers might have diferent bound models. + ''' + points = {} + # Cycle through model coeficients + # and intercepts. + for i, j in [[0,1], [1,2], [0,2]]: + c_i = coefs[i] + int_i = intercepts[i] + c_j = coefs[j] + int_j = intercepts[j] + x = (- int_j/c_j[1] + int_i/c_i[1])/(-c_i[0]/c_i[1] + c_j[0]/c_j[1]) + y = -c_i[0]/c_i[1]*x - int_i/c_i[1] + points[f'{i}{j}'] = (x, y) + # Find mean point + points['mean'] = np.mean([ [x, y] for (x, y) in points.values()], axis=0) + return points + +def _get_expected_value(operator, state, n): + m = 1 + for i in range(n): + if operator[i] == 'Z' and state[i] == '1': + m *= -1 + return m + +def _PTM_fidelity(R, R_id): + return (np.trace(np.matmul(np.transpose(R_id), R))/2+1)/3 + +def _gen_M_matrix(n): + # List of different Operators + ops = ['I','Z'] + Operators = [''.join(op) for op in itertools.product(ops, repeat=n)] + # List of calibration points + states = ['0','1'] + Cal_points = [''.join(s) for s in itertools.product(states, repeat=n)] + # Calculate M matrix + M = np.zeros((2**n, 2**n), dtype=int) + for j, state in enumerate(Cal_points): + Betas = np.ones(len(Operators)) + for i in range(2**n): + Betas[i] = _get_expected_value(Operators[i], state, n) + M[j] = Betas + M = np.linalg.pinv(M) # invert matrix + return M + +def _get_Beta_matrix(Cal_shots_dig, n): + ''' + Calculate RO error model (Beta) matrix. + should be a dictionary with the format: + Cal_shots_dig[][] = array of shots with +1 and -1. + ''' + # List of different Operators + ops = ['I','Z'] + Operators = [''.join(op) for op in itertools.product(ops, repeat=n)] + # List of qubits + Qubits = list(Cal_shots_dig.keys()) + # Calculate Beta matrix + H = {} + B = {} + M = _gen_M_matrix(n) + for op in Operators[1:]: + H[op] = np.zeros(2**n) + for i, state in enumerate(Cal_shots_dig[Qubits[0]].keys()): + correlator = 1 + for j, qubit in enumerate(Qubits): + if op[j] == 'Z': + correlator *= np.array(Cal_shots_dig[Qubits[j]][state]) + H[op][i] = np.mean(correlator) + B[op] = np.dot(M, H[op]) + return B + +def _correct_pauli_vector(Beta_matrix, P_vector): + ''' + Applies readout correction from Beta matrix + to a single qubit pauli vector. + ''' + B_matrix = np.array([Beta_matrix[key][1:] for key in Beta_matrix.keys()]) + B_0 = np.array([Beta_matrix[key][0] for key in Beta_matrix.keys()]) + iB_matrix = np.linalg.inv(B_matrix) + # This part is ony valid for single qubit pauli vectors + _p_vec_corrected = (P_vector[1:]-B_0)*iB_matrix + P_corrected = np.concatenate(([1], _p_vec_corrected[0])) + return P_corrected + +def _gen_density_matrix(mx, my, mz): + Pauli_ops = {} + Pauli_ops['I'] = np.array([[ 1, 0], + [ 0, 1]]) + Pauli_ops['Z'] = np.array([[ 1, 0], + [ 0, -1]]) + Pauli_ops['X'] = np.array([[ 0, 1], + [ 1, 0]]) + Pauli_ops['Y'] = np.array([[ 0,-1j], + [ 1j, 0]]) + rho = (Pauli_ops['I'] + mx*Pauli_ops['X'] + my*Pauli_ops['Y'] + mz*Pauli_ops['Z'])/2 + return rho + +def _get_choi_from_PTM(PTM, dim=2): + Pauli_ops = {} + Pauli_ops['I'] = np.array([[ 1, 0], + [ 0, 1]]) + Pauli_ops['Z'] = np.array([[ 1, 0], + [ 0, -1]]) + Pauli_ops['X'] = np.array([[ 0, 1], + [ 1, 0]]) + Pauli_ops['Y'] = np.array([[ 0,-1j], + [ 1j, 0]]) + paulis = [Pauli_ops['I'], + Pauli_ops['X'], + Pauli_ops['Y'], + Pauli_ops['Z']] + choi_state = np.zeros([dim**2, dim**2], dtype='complex') + for i in range(dim**2): + for j in range(dim**2): + choi_state += 1/dim**2 * PTM[i,j] * np.kron(paulis[j].transpose(), paulis[i]) + return choi_state + +def _get_pauli_transfer_matrix(Pauli_0, Pauli_1, + Pauli_p, Pauli_m, + Pauli_ip, Pauli_im, + M_in = None): + if type(M_in) == type(None): + M_in = np.array([[1, 0, 0, 1], # 0 + [1, 0, 0,-1], # 1 + [1, 1, 0, 0], # + + [1,-1, 0, 0], # - + [1, 0, 1, 0], # +i + [1, 0,-1, 0]]) # -i + M_in=np.transpose(M_in) + M_out= np.array([Pauli_0, + Pauli_1, + Pauli_p, + Pauli_m, + Pauli_ip, + Pauli_im]) + M_out=np.transpose(M_out) + R = np.matmul(M_out, np.linalg.pinv(M_in)) + # Check for physicality + choi_state = _get_choi_from_PTM(R) + if (np.real(np.linalg.eig(choi_state)[0]) < 0).any(): + print('PTM is unphysical') + else: + print('PTM is physical') + return R + +class Gate_process_tomo_Analysis(ba.BaseDataAnalysis): + """ + Analysis for LRU process tomography experiment. + """ + def __init__(self, + qubit: str, + f_state: bool = True, + post_select_2state: bool = False, + t_start: str = None, + t_stop: str = None, + label: str = '', + options_dict: dict = None, + extract_only: bool = False, + auto=True + ): + + super().__init__(t_start=t_start, t_stop=t_stop, + label=label, + options_dict=options_dict, + extract_only=extract_only) + + self.qubit = qubit + self.f_state = f_state + self.post_select_2state = post_select_2state + if auto: + self.run_analysis() + + def extract_data(self): + self.get_timestamps() + self.timestamp = self.timestamps[0] + data_fp = get_datafilepath_from_timestamp(self.timestamp) + param_spec = {'data': ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names')} + self.raw_data_dict = h5d.extract_pars_from_datafile( + data_fp, param_spec) + # Parts added to be compatible with base analysis data requirements + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_fp)[0] + + def process_data(self): + _cycle = 2*18+2 + if self.f_state: + _cycle += 1 + ################################ + # Rotate shots in IQ plane + ################################ + # Sort shots + _raw_shots = self.raw_data_dict['data'][:,1:] + _shots_0 = _raw_shots[36::_cycle] + _shots_1 = _raw_shots[37::_cycle] + if self.f_state: + _shots_2 = _raw_shots[38::_cycle] + # Rotate data + center_0 = np.array([np.mean(_shots_0[:,0]), np.mean(_shots_0[:,1])]) + center_1 = np.array([np.mean(_shots_1[:,0]), np.mean(_shots_1[:,1])]) + raw_shots = rotate_and_center_data(_raw_shots[:,0], _raw_shots[:,1], center_0, center_1) + Shots_0 = raw_shots[36::_cycle] + Shots_1 = raw_shots[37::_cycle] + self.proc_data_dict['shots_0_IQ'] = Shots_0 + self.proc_data_dict['shots_1_IQ'] = Shots_1 + if self.f_state: + Shots_2 = raw_shots[38::_cycle] + self.proc_data_dict['shots_2_IQ'] = Shots_2 + # Use classifier for data + data = np.concatenate((Shots_0, Shots_1, Shots_2)) + labels = [0 for s in Shots_0]+[1 for s in Shots_1]+[2 for s in Shots_2] + from sklearn.discriminant_analysis import LinearDiscriminantAnalysis + clf = LinearDiscriminantAnalysis() + clf.fit(data, labels) + dec_bounds = _decision_boundary_points(clf.coef_, clf.intercept_) + Fid_dict = {} + for state, shots in zip([ '0', '1', '2'], + [Shots_0, Shots_1, Shots_2]): + _res = clf.predict(shots) + _fid = np.mean(_res == int(state)) + Fid_dict[state] = _fid + Fid_dict['avg'] = np.mean([f for f in Fid_dict.values()]) + # Get assignment fidelity matrix + M = np.zeros((3,3)) + for i, shots in enumerate([Shots_0, Shots_1, Shots_2]): + for j, state in enumerate(['0', '1', '2']): + _res = clf.predict(shots) + M[i][j] = np.mean(_res == int(state)) + self.proc_data_dict['classifier'] = clf + self.proc_data_dict['dec_bounds'] = dec_bounds + self.proc_data_dict['Fid_dict'] = Fid_dict + self.qoi = {} + self.qoi['Fid_dict'] = Fid_dict + self.qoi['Assignment_matrix'] = M + ######################################### + # Project data along axis perpendicular + # to the decision boundaries. + ######################################### + ############################ + # Projection along 01 axis. + ############################ + # Rotate shots over 01 axis + shots_0 = rotate_and_center_data(Shots_0[:,0],Shots_0[:,1],dec_bounds['mean'], dec_bounds['01'], phi=np.pi/2) + shots_1 = rotate_and_center_data(Shots_1[:,0],Shots_1[:,1],dec_bounds['mean'], dec_bounds['01'], phi=np.pi/2) + # Take relavant quadrature + shots_0 = shots_0[:,0] + shots_1 = shots_1[:,0] + n_shots_1 = len(shots_1) + # find range + _all_shots = np.concatenate((shots_0, shots_1)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x0, n0 = np.unique(shots_0, return_counts=True) + x1, n1 = np.unique(shots_1, return_counts=True) + Fid_01, threshold_01 = _calculate_fid_and_threshold(x0, n0, x1, n1) + # Histogram of shots for 1 and 2 + h0, bin_edges = np.histogram(shots_0, bins=100, range=_range) + h1, bin_edges = np.histogram(shots_1, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt0, popt1, params_01 = _fit_double_gauss(bin_centers, h0, h1) + # Save processed data + self.proc_data_dict['projection_01'] = {} + self.proc_data_dict['projection_01']['h0'] = h0 + self.proc_data_dict['projection_01']['h1'] = h1 + self.proc_data_dict['projection_01']['bin_centers'] = bin_centers + self.proc_data_dict['projection_01']['popt0'] = popt0 + self.proc_data_dict['projection_01']['popt1'] = popt1 + self.proc_data_dict['projection_01']['SNR'] = params_01['SNR'] + self.proc_data_dict['projection_01']['Fid'] = Fid_01 + self.proc_data_dict['projection_01']['threshold'] = threshold_01 + ############################ + # Projection along 12 axis. + ############################ + # Rotate shots over 12 axis + shots_1 = rotate_and_center_data(Shots_1[:,0],Shots_1[:,1],dec_bounds['mean'], dec_bounds['12'], phi=np.pi/2) + shots_2 = rotate_and_center_data(Shots_2[:,0],Shots_2[:,1],dec_bounds['mean'], dec_bounds['12'], phi=np.pi/2) + # Take relavant quadrature + shots_1 = shots_1[:,0] + shots_2 = shots_2[:,0] + n_shots_2 = len(shots_2) + # find range + _all_shots = np.concatenate((shots_1, shots_2)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x1, n1 = np.unique(shots_1, return_counts=True) + x2, n2 = np.unique(shots_2, return_counts=True) + Fid_12, threshold_12 = _calculate_fid_and_threshold(x1, n1, x2, n2) + # Histogram of shots for 1 and 2 + h1, bin_edges = np.histogram(shots_1, bins=100, range=_range) + h2, bin_edges = np.histogram(shots_2, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt1, popt2, params_12 = _fit_double_gauss(bin_centers, h1, h2) + # Save processed data + self.proc_data_dict['projection_12'] = {} + self.proc_data_dict['projection_12']['h1'] = h1 + self.proc_data_dict['projection_12']['h2'] = h2 + self.proc_data_dict['projection_12']['bin_centers'] = bin_centers + self.proc_data_dict['projection_12']['popt1'] = popt1 + self.proc_data_dict['projection_12']['popt2'] = popt2 + self.proc_data_dict['projection_12']['SNR'] = params_12['SNR'] + self.proc_data_dict['projection_12']['Fid'] = Fid_12 + self.proc_data_dict['projection_12']['threshold'] = threshold_12 + ############################ + # Projection along 02 axis. + ############################ + # Rotate shots over 02 axis + shots_0 = rotate_and_center_data(Shots_0[:,0],Shots_0[:,1],dec_bounds['mean'],dec_bounds['02'], phi=np.pi/2) + shots_2 = rotate_and_center_data(Shots_2[:,0],Shots_2[:,1],dec_bounds['mean'],dec_bounds['02'], phi=np.pi/2) + # Take relavant quadrature + shots_0 = shots_0[:,0] + shots_2 = shots_2[:,0] + n_shots_2 = len(shots_2) + # find range + _all_shots = np.concatenate((shots_0, shots_2)) + _range = (np.min(_all_shots), np.max(_all_shots)) + # Sort shots in unique values + x0, n0 = np.unique(shots_0, return_counts=True) + x2, n2 = np.unique(shots_2, return_counts=True) + Fid_02, threshold_02 = _calculate_fid_and_threshold(x0, n0, x2, n2) + # Histogram of shots for 1 and 2 + h0, bin_edges = np.histogram(shots_0, bins=100, range=_range) + h2, bin_edges = np.histogram(shots_2, bins=100, range=_range) + bin_centers = (bin_edges[1:]+bin_edges[:-1])/2 + popt0, popt2, params_02 = _fit_double_gauss(bin_centers, h0, h2) + # Save processed data + self.proc_data_dict['projection_02'] = {} + self.proc_data_dict['projection_02']['h0'] = h0 + self.proc_data_dict['projection_02']['h2'] = h2 + self.proc_data_dict['projection_02']['bin_centers'] = bin_centers + self.proc_data_dict['projection_02']['popt0'] = popt0 + self.proc_data_dict['projection_02']['popt2'] = popt2 + self.proc_data_dict['projection_02']['SNR'] = params_02['SNR'] + self.proc_data_dict['projection_02']['Fid'] = Fid_02 + self.proc_data_dict['projection_02']['threshold'] = threshold_02 + ################################ + # Process tomo analysis + ################################ + _thresh = self.proc_data_dict['projection_01']['threshold'] + + if self.post_select_2state and self.f_state: + _dig_shots = clf.predict(raw_shots) + else: + _dig_shots = [0 if s < _thresh else 1 for s in raw_shots[:,0]] + # Beta matrix for readout corrections + cal_shots_dig = {self.qubit:{}} + cal_shots_dig[self.qubit]['0'] = [+1 if s < _thresh else -1 for s in Shots_0[:,0]] + cal_shots_dig[self.qubit]['1'] = [+1 if s < _thresh else -1 for s in Shots_1[:,0]] + Beta_matrix = _get_Beta_matrix(cal_shots_dig, n=1) + # Calculate expectation values for each state + States = ['0', '1', '+', '-', '+i', '-i'] + Operators = ['Z', 'X', 'Y'] + # Parse tomography shots and post-select leakage + dig_shots_idle = {} + dig_shots_gate = {} + leak_frac_idle = 0 + leak_frac_gate = 0 + for i, state in enumerate(States): + dig_shots_idle[state] = {} + dig_shots_gate[state] = {} + for j, op in enumerate(Operators): + _shot_list_idle = _dig_shots[i*3+j::_cycle] + _shot_list_gate = _dig_shots[18+i*3+j::_cycle] + # Post-select on leakage + _shot_list_idle = [ s for s in _shot_list_idle if s!=2 ] + _leak_frac_idle = 1-len(_shot_list_idle)/len(_dig_shots[i*3+j::_cycle]) + leak_frac_idle += _leak_frac_idle/18 + _shot_list_gate = [ s for s in _shot_list_gate if s!=2 ] + _leak_frac_gate = 1-len(_shot_list_gate)/len(_dig_shots[i*3+j::_cycle]) + leak_frac_gate += _leak_frac_gate/18 + # Turn in meas outcomes (+1/-1) + dig_shots_idle[state][op] = 1 - 2*np.array(_shot_list_idle) + dig_shots_gate[state][op] = 1 - 2*np.array(_shot_list_gate) + # Build density matrices and pauli vector for each input state + # Ideal states + Density_matrices_ideal = {} + Density_matrices_ideal['0'] = _gen_density_matrix(mx=0, my=0, mz=+1) + Density_matrices_ideal['1'] = _gen_density_matrix(mx=0, my=0, mz=-1) + Density_matrices_ideal['+'] = _gen_density_matrix(mx=+1, my=0, mz=0) + Density_matrices_ideal['-'] = _gen_density_matrix(mx=-1, my=0, mz=0) + Density_matrices_ideal['+i'] = _gen_density_matrix(mx=0, my=+1, mz=0) + Density_matrices_ideal['-i'] = _gen_density_matrix(mx=0, my=-1, mz=0) + Density_matrices_idle = {} + Density_matrices_gate = {} + Pauli_vectors_idle = {} + Pauli_vectors_gate = {} + for state in States: + # Idle + mz = np.mean(dig_shots_idle[state]['Z']) + mx = np.mean(dig_shots_idle[state]['X']) + my = np.mean(dig_shots_idle[state]['Y']) + p_vector = np.array([1, mx, my, mz]) + Pauli_vectors_idle[state] = _correct_pauli_vector(Beta_matrix, p_vector) + rho = _gen_density_matrix(*Pauli_vectors_idle[state][1:]) + Density_matrices_idle[state] = rho + # Gate + mz = np.mean(dig_shots_gate[state]['Z']) + mx = np.mean(dig_shots_gate[state]['X']) + my = np.mean(dig_shots_gate[state]['Y']) + p_vector = np.array([1, mx, my, mz]) + Pauli_vectors_gate[state] = _correct_pauli_vector(Beta_matrix, p_vector) + rho = _gen_density_matrix(*Pauli_vectors_gate[state][1:]) + Density_matrices_gate[state] = rho + # Get PTM + PTM_idle = _get_pauli_transfer_matrix( + Pauli_0 = Pauli_vectors_idle['0'], + Pauli_1 = Pauli_vectors_idle['1'], + Pauli_p = Pauli_vectors_idle['+'], + Pauli_m = Pauli_vectors_idle['-'], + Pauli_ip = Pauli_vectors_idle['+i'], + Pauli_im = Pauli_vectors_idle['-i']) + PTM_gate = _get_pauli_transfer_matrix( + Pauli_0 = Pauli_vectors_gate['0'], + Pauli_1 = Pauli_vectors_gate['1'], + Pauli_p = Pauli_vectors_gate['+'], + Pauli_m = Pauli_vectors_gate['-'], + Pauli_ip = Pauli_vectors_gate['+i'], + Pauli_im = Pauli_vectors_gate['-i']) + # Calculate angle of rotation + def _get_PTM_angles(PTM): + angle_xy = np.arctan2(PTM[1,2], PTM[1,1])*180/np.pi + angle_xz = np.arctan2(PTM[1,3], PTM[1,1])*180/np.pi + angle_yz = np.arctan2(PTM[2,3], PTM[2,2])*180/np.pi + # angle_xy = np.degrees(np.arcsin(PTM[2,1])) + # angle_xz = np.degrees(np.arcsin(PTM[1,3])) + # angle_yz = np.degrees(np.arcsin(PTM[3,2])) + angle_dict = {'xy':angle_xy, 'xz':angle_xz, 'yz':angle_yz} + return angle_dict + Angle_dict_idle = _get_PTM_angles(PTM_idle) + Angle_dict_gate = _get_PTM_angles(PTM_gate) + # get ideal PTM with same angle and + # calculate fidelity of extracted PTM. + PTM_id = np.eye(4) + F_PTM_idle = _PTM_fidelity(PTM_idle, PTM_id) + F_PTM_gate = _PTM_fidelity(PTM_gate, PTM_id) + # PTM_id = _PTM_angle(angle_p) + # F_PTM_rotated = _PTM_fidelity(PTM, PTM_id) + # self.proc_data_dict['PS_frac'] = leak_frac + self.proc_data_dict['Density_matrices_ideal'] = Density_matrices_ideal + self.proc_data_dict['Density_matrices_idle'] = Density_matrices_idle + self.proc_data_dict['Density_matrices_gate'] = Density_matrices_gate + self.proc_data_dict['PTM_idle'] = PTM_idle + self.proc_data_dict['F_PTM_idle'] = F_PTM_idle + self.proc_data_dict['F_PTM_gate'] = F_PTM_gate + self.proc_data_dict['PTM_gate'] = PTM_gate + self.proc_data_dict['PTM_angles_idle'] = Angle_dict_idle + self.proc_data_dict['PTM_angles_gate'] = Angle_dict_gate + + def prepare_plots(self): + self.axs_dict = {} + fig = plt.figure(figsize=(8,4), dpi=100) + axs = [fig.add_subplot(121), + fig.add_subplot(322), + fig.add_subplot(324), + fig.add_subplot(326)] + # fig.patch.set_alpha(0) + self.axs_dict['SSRO_plot'] = axs[0] + self.figs['SSRO_plot'] = fig + self.plot_dicts['SSRO_plot'] = { + 'plotfn': ssro_IQ_projection_plotfn, + 'ax_id': 'SSRO_plot', + 'shots_0': self.proc_data_dict['shots_0_IQ'], + 'shots_1': self.proc_data_dict['shots_1_IQ'], + 'shots_2': self.proc_data_dict['shots_2_IQ'], + 'projection_01': self.proc_data_dict['projection_01'], + 'projection_12': self.proc_data_dict['projection_12'], + 'projection_02': self.proc_data_dict['projection_02'], + 'classifier': self.proc_data_dict['classifier'], + 'dec_bounds': self.proc_data_dict['dec_bounds'], + 'Fid_dict': self.proc_data_dict['Fid_dict'], + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + fig, ax = plt.subplots(figsize=(3,3), dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict['Assignment_matrix'] = ax + self.figs['Assignment_matrix'] = fig + self.plot_dicts['Assignment_matrix'] = { + 'plotfn': assignment_matrix_plotfn, + 'ax_id': 'Assignment_matrix', + 'M': self.qoi['Assignment_matrix'], + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + fig = plt.figure(figsize=(3*1.5, 6*1.5), dpi=200) + axs =[] + for i in range(6): + axs.append(fig.add_subplot(3, 2, i+1 , projection='3d', azim=-35, elev=35)) + # fig.patch.set_alpha(0) + self.axs_dict['Density_matrices_idle'] = axs[0] + self.figs['Density_matrices_idle'] = fig + self.plot_dicts['Density_matrices_idle'] = { + 'plotfn': density_matrices_plotfn, + 'ax_id': 'Density_matrices_idle', + 'Density_matrices': self.proc_data_dict['Density_matrices_idle'], + 'Density_matrices_ideal': self.proc_data_dict['Density_matrices_ideal'], + 'title': 'Idle density matrices', + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + fig = plt.figure(figsize=(3*1.5, 6*1.5), dpi=200) + axs =[] + for i in range(6): + axs.append(fig.add_subplot(3, 2, i+1 , projection='3d', azim=-35, elev=35)) + # fig.patch.set_alpha(0) + self.axs_dict['Density_matrices_gate'] = axs[0] + self.figs['Density_matrices_gate'] = fig + self.plot_dicts['Density_matrices_gate'] = { + 'plotfn': density_matrices_plotfn, + 'ax_id': 'Density_matrices_gate', + 'Density_matrices': self.proc_data_dict['Density_matrices_gate'], + 'Density_matrices_ideal': self.proc_data_dict['Density_matrices_idle'], + 'title': 'Gate density matrices', + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + fig, ax = plt.subplots(figsize=(3.5,3.5), dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict['Pauli_transfer_matrix_Idle'] = ax + self.figs['Pauli_transfer_matrix_Idle'] = fig + self.plot_dicts['Pauli_transfer_matrix_Idle'] = { + 'plotfn': PTM_plotfn, + 'ax_id': 'Pauli_transfer_matrix_Idle', + 'R': self.proc_data_dict['PTM_idle'], + 'R_angles': self.proc_data_dict['PTM_angles_idle'], + 'F_PTM_gate':self.proc_data_dict['F_PTM_gate'], + 'F_PTM_idle':self.proc_data_dict['F_PTM_idle'], + 'title': f'PTM of qubit {self.qubit} after idle', + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + fig, ax = plt.subplots(figsize=(3.5,3.5), dpi=100) + # fig.patch.set_alpha(0) + self.axs_dict['Pauli_transfer_matrix_Gate'] = ax + self.figs['Pauli_transfer_matrix_Gate'] = fig + self.plot_dicts['Pauli_transfer_matrix_Gate'] = { + 'plotfn': PTM_plotfn, + 'ax_id': 'Pauli_transfer_matrix_Gate', + 'R': self.proc_data_dict['PTM_gate'], + 'R_angles': self.proc_data_dict['PTM_angles_gate'], + 'F_PTM_gate':self.proc_data_dict['F_PTM_gate'], + 'F_PTM_idle':self.proc_data_dict['F_PTM_idle'], + 'title': f'PTM of qubit {self.qubit} after gate', + 'qubit': self.qubit, + 'timestamp': self.timestamp + } + + def run_post_extract(self): + self.prepare_plots() # specify default plots + self.plot(key_list='auto', axs_dict=self.axs_dict) # make the plots + if self.options_dict.get('save_figs', False): + self.save_figures( + close_figs=self.options_dict.get('close_figs', True), + tag_tstamp=self.options_dict.get('tag_tstamp', True)) + +def ssro_IQ_projection_plotfn( + shots_0, + shots_1, + shots_2, + projection_01, + projection_12, + projection_02, + classifier, + dec_bounds, + Fid_dict, + timestamp, + qubit, + ax, **kw): + fig = ax.get_figure() + axs = fig.get_axes() + # Fit 2D gaussians + from scipy.optimize import curve_fit + def twoD_Gaussian(data, amplitude, x0, y0, sigma_x, sigma_y, theta): + x, y = data + x0 = float(x0) + y0 = float(y0) + a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2) + b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2) + c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2) + g = amplitude*np.exp( - (a*((x-x0)**2) + 2*b*(x-x0)*(y-y0) + + c*((y-y0)**2))) + return g.ravel() + def _fit_2D_gaussian(X, Y): + counts, _x, _y = np.histogram2d(X, Y, bins=[100, 100], density=True) + x = (_x[:-1] + _x[1:]) / 2 + y = (_y[:-1] + _y[1:]) / 2 + _x, _y = np.meshgrid(_x, _y) + x, y = np.meshgrid(x, y) + p0 = [counts.max(), np.mean(X), np.mean(Y), .2, .2, 0] + popt, pcov = curve_fit(twoD_Gaussian, (x, y), counts.T.ravel(), p0=p0) + return popt + popt_0 = _fit_2D_gaussian(shots_0[:,0], shots_0[:,1]) + popt_1 = _fit_2D_gaussian(shots_1[:,0], shots_1[:,1]) + popt_2 = _fit_2D_gaussian(shots_2[:,0], shots_2[:,1]) + # Plot stuff + axs[0].plot(shots_0[:,0], shots_0[:,1], '.', color='C0', alpha=0.05) + axs[0].plot(shots_1[:,0], shots_1[:,1], '.', color='C3', alpha=0.05) + axs[0].plot(shots_2[:,0], shots_2[:,1], '.', color='C2', alpha=0.05) + axs[0].plot([0, popt_0[1]], [0, popt_0[2]], '--', color='k', lw=.5) + axs[0].plot([0, popt_1[1]], [0, popt_1[2]], '--', color='k', lw=.5) + axs[0].plot([0, popt_2[1]], [0, popt_2[2]], '--', color='k', lw=.5) + axs[0].plot(popt_0[1], popt_0[2], '.', color='C0', label='ground') + axs[0].plot(popt_1[1], popt_1[2], '.', color='C3', label='excited') + axs[0].plot(popt_2[1], popt_2[2], '.', color='C2', label='$2^\mathrm{nd}$ excited') + axs[0].plot(popt_0[1], popt_0[2], 'x', color='white') + axs[0].plot(popt_1[1], popt_1[2], 'x', color='white') + axs[0].plot(popt_2[1], popt_2[2], 'x', color='white') + # Draw 4sigma ellipse around mean + from matplotlib.patches import Ellipse + circle_0 = Ellipse((popt_0[1], popt_0[2]), + width=4*popt_0[3], height=4*popt_0[4], + angle=-popt_0[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[0].add_patch(circle_0) + circle_1 = Ellipse((popt_1[1], popt_1[2]), + width=4*popt_1[3], height=4*popt_1[4], + angle=-popt_1[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[0].add_patch(circle_1) + circle_2 = Ellipse((popt_2[1], popt_2[2]), + width=4*popt_2[3], height=4*popt_2[4], + angle=-popt_2[5]*180/np.pi, + ec='white', fc='none', ls='--', lw=1.25, zorder=10) + axs[0].add_patch(circle_2) + # Plot classifier zones + from matplotlib.patches import Polygon + _all_shots = np.concatenate((shots_0, shots_1)) + _lim = np.max([ np.max(np.abs(_all_shots[:,0]))*1.1, np.max(np.abs(_all_shots[:,1]))*1.1 ]) + Lim_points = {} + for bound in ['01', '12', '02']: + dec_bounds['mean'] + _x0, _y0 = dec_bounds['mean'] + _x1, _y1 = dec_bounds[bound] + a = (_y1-_y0)/(_x1-_x0) + b = _y0 - a*_x0 + _xlim = 1e2*np.sign(_x1-_x0) + _ylim = a*_xlim + b + Lim_points[bound] = _xlim, _ylim + # Plot 0 area + _points = [dec_bounds['mean'], Lim_points['01'], Lim_points['02']] + _patch = Polygon(_points, color='C0', alpha=0.2, lw=0) + axs[0].add_patch(_patch) + # Plot 1 area + _points = [dec_bounds['mean'], Lim_points['01'], Lim_points['12']] + _patch = Polygon(_points, color='C3', alpha=0.2, lw=0) + axs[0].add_patch(_patch) + # Plot 2 area + _points = [dec_bounds['mean'], Lim_points['02'], Lim_points['12']] + _patch = Polygon(_points, color='C2', alpha=0.2, lw=0) + axs[0].add_patch(_patch) + # Plot decision boundary + for bound in ['01', '12', '02']: + _x0, _y0 = dec_bounds['mean'] + _x1, _y1 = Lim_points[bound] + axs[0].plot([_x0, _x1], [_y0, _y1], 'k--', lw=1) + axs[0].set_xlim(-_lim, _lim) + axs[0].set_ylim(-_lim, _lim) + axs[0].legend(frameon=False) + axs[0].set_xlabel('Integrated voltage I') + axs[0].set_ylabel('Integrated voltage Q') + axs[0].set_title(f'IQ plot qubit {qubit}') + fig.suptitle(f'{timestamp}\n') + ########################## + # Plot projections + ########################## + # 01 projection + _bin_c = projection_01['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[1].bar(_bin_c, projection_01['h0'], bin_width, fc='C0', alpha=0.4) + axs[1].bar(_bin_c, projection_01['h1'], bin_width, fc='C3', alpha=0.4) + axs[1].plot(_bin_c, double_gauss(_bin_c, *projection_01['popt0']), '-C0') + axs[1].plot(_bin_c, double_gauss(_bin_c, *projection_01['popt1']), '-C3') + axs[1].axvline(projection_01['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_01["Fid"]*100:.1f}%', + f'SNR : {projection_01["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[1].text(.775, .9, text, transform=axs[1].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[1].text(projection_01['popt0'][0], projection_01['popt0'][4]/2, + r'$|g\rangle$', ha='center', va='center', color='C0') + axs[1].text(projection_01['popt1'][0], projection_01['popt1'][4]/2, + r'$|e\rangle$', ha='center', va='center', color='C3') + axs[1].set_xticklabels([]) + axs[1].set_xlim(_bin_c[0], _bin_c[-1]) + axs[1].set_ylim(bottom=0) + axs[1].set_title('Projection of data') + # 12 projection + _bin_c = projection_12['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[2].bar(_bin_c, projection_12['h1'], bin_width, fc='C3', alpha=0.4) + axs[2].bar(_bin_c, projection_12['h2'], bin_width, fc='C2', alpha=0.4) + axs[2].plot(_bin_c, double_gauss(_bin_c, *projection_12['popt1']), '-C3') + axs[2].plot(_bin_c, double_gauss(_bin_c, *projection_12['popt2']), '-C2') + axs[2].axvline(projection_12['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_12["Fid"]*100:.1f}%', + f'SNR : {projection_12["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[2].text(.775, .9, text, transform=axs[2].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[2].text(projection_12['popt1'][0], projection_12['popt1'][4]/2, + r'$|e\rangle$', ha='center', va='center', color='C3') + axs[2].text(projection_12['popt2'][0], projection_12['popt2'][4]/2, + r'$|f\rangle$', ha='center', va='center', color='C2') + axs[2].set_xticklabels([]) + axs[2].set_xlim(_bin_c[0], _bin_c[-1]) + axs[2].set_ylim(bottom=0) + # 02 projection + _bin_c = projection_02['bin_centers'] + bin_width = _bin_c[1]-_bin_c[0] + axs[3].bar(_bin_c, projection_02['h0'], bin_width, fc='C0', alpha=0.4) + axs[3].bar(_bin_c, projection_02['h2'], bin_width, fc='C2', alpha=0.4) + axs[3].plot(_bin_c, double_gauss(_bin_c, *projection_02['popt0']), '-C0') + axs[3].plot(_bin_c, double_gauss(_bin_c, *projection_02['popt2']), '-C2') + axs[3].axvline(projection_02['threshold'], ls='--', color='k', lw=1) + text = '\n'.join((f'Fid. : {projection_02["Fid"]*100:.1f}%', + f'SNR : {projection_02["SNR"]:.1f}')) + props = dict(boxstyle='round', facecolor='gray', alpha=0) + axs[3].text(.775, .9, text, transform=axs[3].transAxes, + verticalalignment='top', bbox=props, fontsize=7) + axs[3].text(projection_02['popt0'][0], projection_02['popt0'][4]/2, + r'$|g\rangle$', ha='center', va='center', color='C0') + axs[3].text(projection_02['popt2'][0], projection_02['popt2'][4]/2, + r'$|f\rangle$', ha='center', va='center', color='C2') + axs[3].set_xticklabels([]) + axs[3].set_xlim(_bin_c[0], _bin_c[-1]) + axs[3].set_ylim(bottom=0) + axs[3].set_xlabel('Integrated voltage') + # Write fidelity textbox + text = '\n'.join(('Assignment fidelity:', + f'$F_g$ : {Fid_dict["0"]*100:.1f}%', + f'$F_e$ : {Fid_dict["1"]*100:.1f}%', + f'$F_f$ : {Fid_dict["2"]*100:.1f}%', + f'$F_\mathrm{"{avg}"}$ : {Fid_dict["avg"]*100:.1f}%')) + props = dict(boxstyle='round', facecolor='gray', alpha=.2) + axs[1].text(1.05, 1, text, transform=axs[1].transAxes, + verticalalignment='top', bbox=props) + +def assignment_matrix_plotfn( + M, + qubit, + timestamp, + ax, **kw): + fig = ax.get_figure() + im = ax.imshow(M, cmap=plt.cm.Reds, vmin=0, vmax=1) + for i in range(3): + for j in range(3): + c = M[j,i] + if abs(c) > .5: + ax.text(i, j, '{:.2f}'.format(c), va='center', ha='center', + color = 'white') + else: + ax.text(i, j, '{:.2f}'.format(c), va='center', ha='center') + ax.set_xticks([0,1,2]) + ax.set_xticklabels([r'$|0\rangle$',r'$|1\rangle$',r'$|2\rangle$']) + ax.set_xlabel('Assigned state') + ax.set_yticks([0,1,2]) + ax.set_yticklabels([r'$|0\rangle$',r'$|1\rangle$',r'$|2\rangle$']) + ax.set_ylabel('Prepared state') + ax.set_title(f'{timestamp}\nQutrit assignment matrix qubit {qubit}') + cbar_ax = fig.add_axes([.95, .15, .03, .7]) + cb = fig.colorbar(im, cax=cbar_ax) + cb.set_label('assignment probability') + +def _plot_density_matrix(rho, f, ax, rho_ideal=None, state=None, cbar=True): + if state is None: + pass + else: + ax.set_title('Logical state '+state, pad=10, fontsize=5) + ax.set_xticks([-.1, .9]) + ax.set_yticks([-.1, .9]) + ax.set_xticklabels(['$0$', '$1$'], rotation=0, fontsize=4.5) + ax.set_yticklabels(['$0$', '$1$'], rotation=0, fontsize=4.5) + ax.tick_params(axis='x', which='major', pad=-6) + ax.tick_params(axis='y', which='major', pad=-7) + ax.tick_params(axis='z', which='major', pad=-4) + for tick in ax.yaxis.get_majorticklabels(): + tick.set_horizontalalignment("left") + ax.set_zticks(np.linspace(0, 1, 3)) + ax.set_zticklabels(['0', '0.5', '1'], fontsize=4) + ax.set_zlim(0, 1) + + xedges = np.arange(-.75, 2, 1) + yedges = np.arange(-.75, 2, 1) + xpos, ypos = np.meshgrid(xedges[:-1] + 0.25, yedges[:-1] + 0.25, indexing="ij") + xpos = xpos.ravel() + ypos = ypos.ravel() + zpos = 0 + dx = dy = .8 + dz = np.abs(rho).ravel() + + cmap = matplotlib.colors.LinearSegmentedColormap.from_list("", ["C3",'darkseagreen',"C0", + 'antiquewhite',"C3"]) + norm = matplotlib.colors.Normalize(vmin=-np.pi, vmax=np.pi) + color=cmap(norm([np.angle(e) for e in rho.ravel()])) + ax.bar3d(xpos, ypos, zpos, dx, dy, dz, zsort='max', + color=color, alpha=1 , edgecolor='black', linewidth=.1) + if rho_ideal is not None: + dz1 = np.abs(rho_ideal).ravel() + color1=cmap(norm([np.angle(e) for e in rho_ideal.ravel()])) + # selector + s = [k for k in range(len(dz1)) if dz1[k] > .15] + ax.bar3d(xpos[s], ypos[s], dz[s], dx, dy, dz=dz1[s]-dz[s], zsort='min', + color=color1[s], alpha=.25, edgecolor='black', linewidth=.4) + sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) + if cbar: + cb = f.colorbar(sm) + cb.set_ticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi]) + cb.set_ticklabels(['$-\pi$', '$-\pi/2$', '0', '$\pi/2$', '$\pi$']) + cb.set_label('arg', fontsize=3) + cb.ax.tick_params(labelsize=3) + cb.outline.set_linewidth(.5) + cb.ax.tick_params(labelsize=3, width=.5, length=1, pad=1) + f.axes[-1].set_position([.85, .2, .05, .6]) + f.axes[-1].get_yaxis().labelpad=-4 + ax.set_zlabel(r'$|\rho|$', fontsize=5, labelpad=-51) + +def density_matrices_plotfn( + Density_matrices, + Density_matrices_ideal, + timestamp, + qubit, + ax, + title=None, + **kw): + fig = ax.get_figure() + axs = fig.get_axes() + + States = [s for s in Density_matrices.keys()] + for i, state in enumerate(States): + axs[i].axes.axes.set_position((.2+(i//2)*.225, .43-(i%2)*.1, .15*1.5, .075*1.1)) + _plot_density_matrix(Density_matrices[state], fig, axs[i], state=None, cbar=False, + rho_ideal=Density_matrices_ideal[state]) + axs[i].set_title(r'Input state $|{}\rangle$'.format(state), fontsize=5, pad=-5) + axs[i].patch.set_visible(False) + # vertical colorbar + from mpl_toolkits.axes_grid1.inset_locator import inset_axes + cbar_ax = fig.add_axes([.9, .341, .01, .15]) + cmap = matplotlib.colors.LinearSegmentedColormap.from_list('', ['C3','darkseagreen','C0','antiquewhite','C3']) + norm = matplotlib.colors.Normalize(vmin=-np.pi, vmax=np.pi) + sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) + cb = fig.colorbar(sm, cax=cbar_ax, orientation='vertical') + cb.set_ticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi]) + cb.set_ticklabels(['$-\pi$', '$-\pi/2$', '0', '$\pi/2$', '$\pi$']) + cb.set_label(r'arg($\rho$)', fontsize=5, labelpad=-8) + cb.ax.tick_params(labelsize=6) + cb.outline.set_linewidth(.5) + cb.ax.tick_params(labelsize=5, width=.5, length=2, pad=3) + _title = timestamp + if title: + _title += '\n'+title + fig.suptitle(_title, y=.55, x=.55, size=6.5) + +def PTM_plotfn( + R, + R_angles, + F_PTM_gate, + F_PTM_idle, + timestamp, + qubit, + ax, + title=None, + **kw): + im = ax.imshow(R, cmap=plt.cm.PiYG, vmin=-1, vmax=1) + ax.set_xticks(np.arange(4)) + ax.set_yticks(np.arange(4)) + ax.set_xticklabels([ '$I$', '$X$', '$Y$', '$Z$']) + ax.set_yticklabels([ '$I$', '$X$', '$Y$', '$Z$']) + for i in range(4): + for j in range(4): + c = R[j,i] + if abs(c) > .5: + ax.text(i, j, '{:.2f}'.format(c), va='center', ha='center', + color = 'white') + else: + ax.text(i, j, '{:.2f}'.format(c), va='center', ha='center') + _title = f'{timestamp}' + if title: + _title += '\n'+title + ax.set_title(_title, pad=4) + ax.set_xlabel('input Pauli operator') + ax.set_ylabel('Output Pauli operator') + ax.axes.tick_params(width=.5, length=2) + ax.spines['left'].set_linewidth(.5) + ax.spines['right'].set_linewidth(.5) + ax.spines['top'].set_linewidth(.5) + ax.spines['bottom'].set_linewidth(.5) + if R_angles: + text = '\n'.join(('PTM rotation angles', + '$\phi_{xy}=$'+f'{R_angles["xy"]:.1f} deg.', + '$\phi_{xz}=$'+f'{R_angles["xz"]:.1f} deg.', + '$\phi_{yz}=$'+f'{R_angles["yz"]:.1f} deg.',)) + # f'$F_\mathrm{"{idle}"}=$'+f'{F_PTM_idle*100:.1f} %.', + # f'$F_\mathrm{"{gate}"}=$'+f'{F_PTM_gate*100:.1f} %.',)) + props = dict(boxstyle='round', facecolor='white', alpha=1) + ax.text(1.05, 1., text, transform=ax.transAxes, + verticalalignment='top', bbox=props) diff --git a/pycqed/instrument_drivers/library/Transport.py b/pycqed/instrument_drivers/library/Transport.py index 9d1bfac72c..029ef18351 100644 --- a/pycqed/instrument_drivers/library/Transport.py +++ b/pycqed/instrument_drivers/library/Transport.py @@ -44,7 +44,7 @@ class IPTransport(Transport): def __init__(self, host: str, port: int = 5025, - timeout = 40.0, + timeout = 60.0, snd_buf_size: int = 512 * 1024) -> None: """ establish connection, e.g. IPTransport('192.168.0.16', 4000) diff --git a/pycqed/instrument_drivers/meta_instrument/LutMans/flux_lutman_vcz.py b/pycqed/instrument_drivers/meta_instrument/LutMans/flux_lutman_vcz.py index 1e9c9f866e..334df04392 100644 --- a/pycqed/instrument_drivers/meta_instrument/LutMans/flux_lutman_vcz.py +++ b/pycqed/instrument_drivers/meta_instrument/LutMans/flux_lutman_vcz.py @@ -54,7 +54,7 @@ 4: {"name": "cz_NW", "type": "idle_z", "which": "NW"}, 5: {"name": "park", "type": "square"}, 6: {"name": "square", "type": "square"}, - 7: {"name": "custom_wf", "type": "custom"}, + 7: {"name": "cz_aux", "type": "cz", "which": "aux"}, } @@ -132,6 +132,36 @@ def render_wave( class HDAWG_Flux_LutMan(Base_Flux_LutMan): + + # region Class Properties + @property + def total_length(self) -> int: + """:return: Total number of sample points dedicated to waveform.""" + return self.total_park_length + self.total_pad_length + + @property + def total_park_length(self) -> int: + """:return: Total number of sample points dedicated to parking.""" + return int(np.round(self.sampling_rate() * self.park_length())) + + @property + def total_pad_length(self) -> int: + """:return: Total number of sample points dedicated to waveform padding.""" + return int(np.round(self.sampling_rate() * self.park_pad_length() * 2)) + + @property + def first_pad_length(self) -> int: + """:return: Number of sample points in first pad-'arm' of two-sided parking.""" + equal_split: int = int(np.round(self.sampling_rate() * self.park_pad_length())) + minimum_padding: int = 1 + return max(min(equal_split + self.park_pad_symmetry_offset(), self.total_pad_length - minimum_padding), minimum_padding) + + @property + def second_pad_length(self) -> int: + """:return: Number of sample points in second pad-'arm' of two-sided parking.""" + return self.total_pad_length - self.first_pad_length + # endregion + def __init__(self, name, **kw): super().__init__(name, **kw) self._wave_dict_dist = dict() @@ -190,18 +220,33 @@ def _gen_square(self): length=self.sq_length(), sampling_rate=self.sampling_rate(), delay=self.sq_delay(), + gauss_sigma=self.sq_gauss_sigma(), ) def _gen_park(self): + zeros = np.zeros(int(self.park_pad_length() * self.sampling_rate())) + # Padding + first_zeros: np.ndarray = np.zeros(shape=self.first_pad_length) + second_zeros: np.ndarray = np.zeros(shape=self.second_pad_length) + if self.park_double_sided(): ones = np.ones(int(self.park_length() * self.sampling_rate() / 2)) - zeros = np.zeros(int(self.park_pad_length() * self.sampling_rate())) pulse_pos = self.park_amp() * ones - return np.concatenate((zeros, pulse_pos, - pulse_pos, zeros)) + return np.concatenate(( + first_zeros, + +1 * pulse_pos, + -1 * pulse_pos, + second_zeros, + )) else: - return self.park_amp() * np.ones( + pulse_pos = self.park_amp() * np.ones( int(self.park_length() * self.sampling_rate()) ) + return np.concatenate(( + first_zeros, + pulse_pos, + second_zeros, + )) def _add_qubit_parameters(self): """ @@ -288,7 +333,7 @@ def _add_waveform_parameters(self): "vcz_waveform": wf_vcz.vcz_waveform } - for this_cz in ["NE", "NW", "SW", "SE"]: + for this_cz in ["NE", "NW", "SW", "SE", "aux"]: self.add_parameter( "cz_wf_generator_%s" % this_cz, initial_value="vcz_waveform", @@ -313,7 +358,17 @@ def _add_waveform_parameters(self): unit="s", label="Parking pulse padding duration (single-sided)", initial_value=0, - vals=vals.Numbers(0, 20e-9), + vals=vals.Numbers(0, 100e-6), + parameter_class=ManualParameter, + ) + self.add_parameter( + "park_pad_symmetry_offset", + unit="samples", + label="Parking pulse padding samling point offset.\ + Applies offset to sampling points in initial padding (additive) and final padding (subtractive).\ + The offset is bounded such that the padding is minimal 1# and maximal total_padding - 1#.", + initial_value=0, + vals=vals.Numbers(-100, 100), parameter_class=ManualParameter, ) self.add_parameter( @@ -359,6 +414,14 @@ def _add_waveform_parameters(self): vals=vals.Numbers(0, 100e-6), parameter_class=ManualParameter, ) + self.add_parameter( + "sq_gauss_sigma", + unit="s", + label="Sigma for gaussian filter", + initial_value=0e-9, + vals=vals.Numbers(0, 100e-6), + parameter_class=ManualParameter, + ) # CODEWORD 7: CUSTOM @@ -407,7 +470,7 @@ def calc_amp_to_eps( state_A: str = "01", state_B: str = "02", which_gate: str = "NE", - ): + ): """ Calculates detuning between two levels as a function of pulse amplitude in Volt. @@ -441,7 +504,7 @@ def calc_eps_to_dac( state_B: str = "02", which_gate: str = "NE", positive_branch=True, - ): + ): """ See `calc_eps_to_amp` """ @@ -457,7 +520,7 @@ def calc_eps_to_amp( state_B: str = "02", which_gate: str = "NE", positive_branch=True, - ): + ): """ Calculates amplitude in Volt corresponding to an energy difference between two states in Hz. @@ -635,7 +698,7 @@ def calc_freq_to_amp( state: str = "01", which_gate: str = "NE", positive_branch=True, - ): + ): """ Calculates amplitude in Volt corresponding to the energy of a state in Hz. @@ -794,13 +857,31 @@ def calc_amp_to_freq(self, amp: float, state: str = "01", which_gate: str = "NE" return np.polyval(polycoeffs, amp) + def calc_parking_freq(self): + _gain = self.cfg_awg_channel_amplitude() + _rang = self.cfg_awg_channel_range() + _dac = self.park_amp() + _out_amp = _gain*_dac*_rang/2 + + _coefs = self.q_polycoeffs_freq_01_det() + return np.polyval(_coefs, _out_amp) + + def calc_gate_freq(self, direction:str): + _gain = self.cfg_awg_channel_amplitude() + _rang = self.cfg_awg_channel_range() + _dac = self.get(f'vcz_amp_dac_at_11_02_{direction}') + _fac = self.get(f'vcz_amp_sq_{direction}') + _out_amp = _gain*_dac*_fac*_rang/2 + + _coefs = self.q_polycoeffs_freq_01_det() + return np.polyval(_coefs, _out_amp) + ################################# # Waveform loading methods # ################################# - def load_waveform_onto_AWG_lookuptable( self, wave_id: str, regenerate_waveforms: bool = False - ): + ): """ Loads a specific waveform to the AWG """ @@ -848,7 +929,7 @@ def load_waveform_onto_AWG_lookuptable( def load_waveforms_onto_AWG_lookuptable( self, regenerate_waveforms: bool = True, stop_start: bool = True - ): + ): """ Loads all waveforms specified in the LutMap to an AWG for both this LutMap and the partner LutMap. @@ -881,8 +962,9 @@ def _append_zero_samples(self, waveform): Helper method to ensure waveforms have the desired length """ length_samples = roundup1024( - int(self.sampling_rate() * self.cfg_max_wf_length()) - ) + int(self.sampling_rate() * self.cfg_max_wf_length()), + self.cfg_max_wf_length() + ) extra_samples = length_samples - len(waveform) if extra_samples >= 0: y_sig = np.concatenate([waveform, np.zeros(extra_samples)]) @@ -919,7 +1001,8 @@ def distort_waveform(self, waveform, inverse=False): distorted_waveform = k.distort_waveform( waveform, length_samples=int( - roundup1024(self.cfg_max_wf_length() * self.sampling_rate()) + roundup1024(self.cfg_max_wf_length() * self.sampling_rate(), + self.cfg_max_wf_length()) ), inverse=inverse, ) @@ -935,7 +1018,6 @@ def distort_waveform(self, waveform, inverse=False): ################################# # Plotting methods # ################################# - def plot_cz_trajectory(self, axs=None, show=True, which_gate="NE"): """ Plots the cz trajectory in frequency space. @@ -1105,7 +1187,7 @@ def plot_level_diagram(self, ax=None, show=True, which_gate="NE"): def plot_cz_waveforms( self, qubits: list, which_gate_list: list, ax=None, show: bool = True - ): + ): """ Plots the cz waveforms from several flux lutamns, mainly for verification, time alignment and debugging @@ -1271,10 +1353,218 @@ def _add_cfg_parameters(self): ) +class LRU_Flux_LutMan(Base_Flux_LutMan): + def __init__(self, name, **kw): + super().__init__(name, **kw) + self._wave_dict_dist = dict() + self.sampling_rate(2.4e9) + + def _add_waveform_parameters(self): + # CODEWORD 1: Idling + self.add_parameter( + "idle_pulse_length", + unit="s", + label="Idling pulse length", + initial_value=40e-9, + vals=vals.Numbers(0, 100e-6), + parameter_class=ManualParameter, + ) + # Parameters for leakage reduction unit pulse. + self.add_parameter('mw_lru_modulation', unit='Hz', + docstring=('Modulation frequency for LRU pulse.'), + vals=vals.Numbers(), + parameter_class=ManualParameter, initial_value=0.0e6) + self.add_parameter('mw_lru_amplitude', unit='frac', + docstring=('amplitude for LRU pulse.'), + vals=vals.Numbers(-1, 1), + parameter_class=ManualParameter, initial_value=.8) + self.add_parameter('mw_lru_duration', unit='s', + vals=vals.Numbers(), + parameter_class=ManualParameter, + initial_value=300e-9) + self.add_parameter('mw_lru_rise_duration', unit='s', + vals=vals.Numbers(), + parameter_class=ManualParameter, + initial_value=30e-9) + + def _add_cfg_parameters(self): + self.add_parameter( + "cfg_awg_channel", + initial_value=1, + vals=vals.Ints(1, 8), + parameter_class=ManualParameter, + ) + self.add_parameter( + "_awgs_fl_sequencer_program_expected_hash", # FIXME: un used? + docstring="crc32 hash of the awg8 sequencer program. " + "This parameter is used to dynamically determine " + "if the program needs to be uploaded. The initial_value is" + " None, indicating that the program needs to be uploaded." + " After the first program is uploaded, the value is set.", + initial_value=None, + vals=vals.Ints(), + parameter_class=ManualParameter, + ) + self.add_parameter( + "cfg_max_wf_length", + parameter_class=ManualParameter, + initial_value=10e-6, + unit="s", + vals=vals.Numbers(0, 100e-6), + ) + self.add_parameter( + "cfg_awg_channel_range", + docstring="peak peak value, channel range of 5 corresponds to -2.5V to +2.5V", + get_cmd=self._get_awg_channel_range, + unit="V_pp", + ) + self.add_parameter( + "cfg_awg_channel_amplitude", + docstring="digital scale factor between 0 and 1", + get_cmd=self._get_awg_channel_amplitude, + set_cmd=self._set_awg_channel_amplitude, + unit="a.u.", + vals=vals.Numbers(0, 1), + ) + + def set_default_lutmap(self): + """Set the default lutmap for LRU drive pulses.""" + lm = { + 0: {"name": "i", "type": "idle"}, + 1: {"name": "lru", "type": "lru"}, + } + self.LutMap(lm) + + def generate_standard_waveforms(self): + + """ + Generate all the standard waveforms and populates self._wave_dict + """ + + self._wave_dict = {} + # N.B. the naming convention ._gen_{waveform_name} must be preserved + # as it is used in the load_waveform_onto_AWG_lookuptable method. + self._wave_dict["i"] = self._gen_i() + self._wave_dict["lru"] = self._gen_lru() + + def _gen_i(self): + return np.zeros(int(self.idle_pulse_length() * self.sampling_rate())) + + def _gen_lru(self): + self.lru_func = wf.mod_lru_pulse + _wf = self.lru_func( + t_total = self.mw_lru_duration(), + t_rise = self.mw_lru_rise_duration(), + f_modulation = self.mw_lru_modulation(), + amplitude = self.mw_lru_amplitude(), + sampling_rate = self.sampling_rate())[0] + return _wf + + def _get_awg_channel_amplitude(self): + AWG = self.AWG.get_instr() + awg_ch = self.cfg_awg_channel() - 1 # -1 is to account for starting at 1 + awg_nr = awg_ch // 2 + ch_pair = awg_ch % 2 + + channel_amp = AWG.get("awgs_{}_outputs_{}_amplitude".format(awg_nr, ch_pair)) + return channel_amp + + def _set_awg_channel_amplitude(self, val): + AWG = self.AWG.get_instr() + awg_ch = self.cfg_awg_channel() - 1 # -1 is to account for starting at 1 + awg_nr = awg_ch // 2 + ch_pair = awg_ch % 2 + AWG.set("awgs_{}_outputs_{}_amplitude".format(awg_nr, ch_pair), val) + + def _get_awg_channel_range(self): + AWG = self.AWG.get_instr() + awg_ch = self.cfg_awg_channel() - 1 # -1 is to account for starting at 1 + # channel range of 5 corresponds to -2.5V to +2.5V + for i in range(5): + channel_range_pp = AWG.get("sigouts_{}_range".format(awg_ch)) + if channel_range_pp is not None: + break + time.sleep(0.5) + return channel_range_pp + + def _append_zero_samples(self, waveform): + """ + Helper method to ensure waveforms have the desired length + """ + length_samples = roundup1024( + int(self.sampling_rate() * self.cfg_max_wf_length()), + self.cfg_max_wf_length() + ) + extra_samples = length_samples - len(waveform) + if extra_samples >= 0: + y_sig = np.concatenate([waveform, np.zeros(extra_samples)]) + else: + y_sig = waveform[:extra_samples] + return y_sig + + def load_waveform_onto_AWG_lookuptable( + self, wave_id: str, regenerate_waveforms: bool = False): + """ + Loads a specific waveform to the AWG + """ + # Here we are ductyping to determine if the waveform name or the + # codeword was specified. + if type(wave_id) == str: + waveform_name = wave_id + codeword = get_wf_idx_from_name(wave_id, self.LutMap()) + else: + waveform_name = self.LutMap()[wave_id]["name"] + codeword = wave_id + + if regenerate_waveforms: + gen_wf_func = getattr(self, "_gen_{}".format(waveform_name)) + self._wave_dict[waveform_name] = gen_wf_func() + + waveform = self._wave_dict[waveform_name] + codeword_str = "wave_ch{}_cw{:03}".format(self.cfg_awg_channel(), codeword) + + # This is where the fixed length waveform is + # set to cfg_max_wf_length + waveform = self._append_zero_samples(waveform) + self._wave_dict_dist[waveform_name] = waveform + + self.AWG.get_instr().set(codeword_str, waveform) + + def load_waveforms_onto_AWG_lookuptable( + self, regenerate_waveforms: bool = True, stop_start: bool = True): + """ + Loads all waveforms specified in the LutMap to an AWG for both this + LutMap and the partner LutMap. + + Args: + regenerate_waveforms (bool): if True calls + generate_standard_waveforms before uploading. + stop_start (bool): if True stops and starts the AWG. + + """ + + AWG = self.AWG.get_instr() + + if stop_start: + AWG.stop() + + for idx, waveform in self.LutMap().items(): + self.load_waveform_onto_AWG_lookuptable( + wave_id=idx, regenerate_waveforms=regenerate_waveforms + ) + + self.cfg_awg_channel_amplitude() + self.cfg_awg_channel_range() + + if stop_start: + AWG.start() + + ######################################################################### # Convenience functions below ######################################################################### -def roundup1024(n): - return int(np.ceil(n / 96) * 96) # FIXME: does not perform rounding implied by function name +def roundup1024(n, waveform_length): + n_samples = int(waveform_length*2.4e9) + return int(np.ceil(n / n_samples) * n_samples) diff --git a/pycqed/instrument_drivers/meta_instrument/LutMans/mw_lutman.py b/pycqed/instrument_drivers/meta_instrument/LutMans/mw_lutman.py index e70e5c6cb3..c310d764a0 100644 --- a/pycqed/instrument_drivers/meta_instrument/LutMans/mw_lutman.py +++ b/pycqed/instrument_drivers/meta_instrument/LutMans/mw_lutman.py @@ -1,9 +1,8 @@ from .base_lutman import Base_LutMan, get_redundant_codewords, get_wf_idx_from_name - import numpy as np from collections.abc import Iterable from collections import OrderedDict - +from importlib import reload from qcodes.instrument.parameter import ManualParameter from qcodes.utils import validators as vals @@ -26,15 +25,19 @@ 13 : {"name" : "rX45" , "theta" : 45 , "phi" : 0 , "type" : "ge"}, 14 : {"name" : "rXm45" , "theta" : -45 , "phi" : 0 , "type" : "ge"}, 15 : {"name" : "rX12_90" , "theta" : 90, "phi" : 0 , "type" : "ef"}, + 16 : {"name" : "rX23_90" , "theta" : 90, "phi" : 0 , "type" : "fh"}, + 27 : {'name': 'rXm180', 'phi': 0, 'theta': -180, 'type': 'ge'}, 30 : {"name" : "rPhi180" , "theta" : 180 , "phi" : 0 , "type" : "ge"}, - 52 : {"name" : "phaseCorrPark1" , "type" : "phase"}, - 53 : {"name" : "phaseCorrPark2" , "type" : "phase"}, - 54 : {"name" : "phaseCorrPark3" , "type" : "phase"}, - 55 : {"name" : "phaseCorrPark4" , "type" : "phase"}, - 56 : {"name" : "phaseCorrPark5" , "type" : "phase"}, - 57 : {"name" : "phaseCorrPark6" , "type" : "phase"}, - 58 : {"name" : "phaseCorrPark7" , "type" : "phase"}, - 59 : {"name" : "phaseCorrPark8" , "type" : "phase"}, + 30 : {"name" : "rX23" , "theta" : 180 , "phi" : 0 , "type" : "fh"}, + 51 : {"name" : "phaseCorrLRU" , "type" : "phase"}, + 52 : {"name" : "phaseCorrStep1" , "type" : "phase"}, + 53 : {"name" : "phaseCorrStep2" , "type" : "phase"}, + 54 : {"name" : "phaseCorrStep3" , "type" : "phase"}, + 55 : {"name" : "phaseCorrStep4" , "type" : "phase"}, + 56 : {"name" : "phaseCorrStep5" , "type" : "phase"}, + 57 : {"name" : "phaseCorrStep6" , "type" : "phase"}, + 58 : {"name" : "phaseCorrStep7" , "type" : "phase"}, + 59 : {"name" : "phaseCorrStep8" , "type" : "phase"}, 60 : {"name" : "phaseCorrNW" , "type" : "phase"}, 61 : {"name" : "phaseCorrNE" , "type" : "phase"}, 62 : {"name" : "phaseCorrSW" , "type" : "phase"}, @@ -239,6 +242,7 @@ def _add_waveform_parameters(self): # defined here so that the VSM based LutMan can overwrite this self.wf_func = wf.mod_gauss self.spec_func = wf.block_pulse + self.lru_func = wf.mod_lru_pulse self._add_channel_params() self._add_mixer_corr_pars() @@ -305,16 +309,16 @@ def _add_waveform_parameters(self): # parameters related to timings self.add_parameter( 'pulse_delay', - unit='s', vals=vals.Numbers(0, 1e-6), + unit='s', parameter_class=ManualParameter, initial_value=0 ) # square pulse duration for larger pulses self.add_parameter( 'sq_pulse_duration', - unit='s', vals=vals.Numbers(0, 1e-6), + unit='s', parameter_class=ManualParameter, initial_value=40e-9 ) @@ -345,6 +349,23 @@ def _add_waveform_parameters(self): parameter_class=ManualParameter, initial_value=.2 ) + self.add_parameter( + 'mw_fh_modulation', + vals=vals.Numbers(), + unit='Hz', + docstring=('Modulation frequency for driving pulses to the third excited-state.'), + parameter_class=ManualParameter, + initial_value=50.0e6 + ) + self.add_parameter( + 'mw_fh_amp180', + unit='frac', + docstring=('Pulse amplitude for pulsing the fh/23 transition'), + vals=vals.Numbers(-1, 1), + parameter_class=ManualParameter, + initial_value=.2 + ) + def generate_standard_waveforms(self, apply_predistortion_matrix: bool=True): self._wave_dict = OrderedDict() @@ -385,6 +406,18 @@ def generate_standard_waveforms(self, apply_predistortion_matrix: bool=True): motzoi=0, delay=self.pulse_delay()) + elif waveform['type'] == 'fh': + amp = theta_to_amp(theta=waveform['theta'], + amp180=self.mw_fh_amp180()) + self._wave_dict[idx] = self.wf_func( + amp=amp, + phase=waveform['phi'], + sigma_length=self.mw_gauss_width(), + f_modulation=self.mw_fh_modulation(), + sampling_rate=self.sampling_rate(), + motzoi=0, + delay=self.pulse_delay()) + elif waveform['type'] == 'raw-drag': self._wave_dict[idx] = self.wf_func( **waveform["drag_pars"]) @@ -427,6 +460,15 @@ def generate_standard_waveforms(self, apply_predistortion_matrix: bool=True): ) else: raise KeyError('Expected parameter "sq_amp" to exist') + + elif waveform['type'] == 'lru': + self._wave_dict[idx] = self.lru_func( + t_total = self.mw_lru_duration(), + t_rise = self.mw_lru_rise_duration(), + f_modulation = f_modulation, + amplitude = self.mw_lru_amplitude(), + sampling_rate = self.sampling_rate()) + elif waveform['type'] == 'phase': pass else: @@ -585,14 +627,14 @@ def load_ef_rabi_pulses_to_AWG_lookuptable(self, amps: list=None, if mod_freqs is None: mod_freqs = [self.mw_ef_modulation()]*len(amps) elif len(mod_freqs) == 1: - mod_freqs = [mod_freqs]*len(amps) + mod_freqs = mod_freqs*len(amps) # 2. Generate a LutMap for the ef-pulses # FIXME: hardcoded indices must match OpenQL definitions lm = self.LutMap() for i, (amp, mod_freq) in enumerate(zip(amps, mod_freqs)): - lm[i+9] = {"name": "", "type": "raw-drag", - "drag_pars": { + lm[i+30] = {"name": "", "type": "raw-drag", + "drag_pars": { "amp": amp, "f_modulation": mod_freq, "sigma_length": self.mw_gauss_width(), "sampling_rate": self.sampling_rate(), @@ -779,44 +821,38 @@ def _add_phase_correction_parameters(self): "Will be applied as increment to sine generator phases via command table." ) - # corrections for phases that the qubit can acquire during parking as spectator of a CZ gate. - # this can happen in general for each of its neighbouring qubits (below: 'direction'), - # while it is doing a gate in each possible direction (below: 'gate') - # for direction in ['NW','NE','SW','SE']: - # for gate in ['NW','NE','SW','SE']: - # self.add_parameter( - # name=f'vcz_virtual_q_ph_corr_spec_{direction}_gate_{gate}', - # parameter_class=ManualParameter, - # unit='deg', - # vals=vals.Numbers(0, 360), - # initial_value=0.0, - # docstring=f"Virtual phase correction for parking as spectator of a qubit in direction {direction}, " - # f"that is doing a gate in direction {gate}." - # "Will be applied as increment to sine generator phases via command table." - # ) - # corrections for phases that the qubit can acquire during parking as part of a flux-dance step # there are 8 flux-dance steps for the S17 scheme. # NOTE: this correction must not be the same as the above one for the case of a spectator # for a single CZ, because in a flux-dance the qubit can be parked because of multiple adjacent CZ gates for step in np.arange(1,9): self.add_parameter( - name=f'vcz_virtual_q_ph_corr_park_step_{step}', + name=f'vcz_virtual_q_ph_corr_step_{step}', parameter_class=ManualParameter, unit='deg', - vals=vals.Numbers(-360, 360), + vals=vals.Numbers(0, 360), initial_value=0.0, docstring=f"Virtual phase correction for parking in flux-dance step {step}." "Will be applied as increment to sine generator phases via command table." ) + # LRU phase correction + self.add_parameter( + name=f'LRU_virtual_q_ph_corr', + parameter_class=ManualParameter, + unit='deg', + vals=vals.Numbers(0, 360), + initial_value=0.0, + docstring=f"Virtual phase correction for LRU gate." + "Will be applied as increment to sine generator phases via command table." + ) + def _reset_phase_correction_parameters(self): for gate in ['NW','NE','SW','SE']: self.parameters[f'vcz_virtual_q_ph_corr_{gate}'](0) for step in np.arange(1,9): self.parameters[f'vcz_virtual_q_ph_corr_park_step_{step}'](0) - def _set_channel_range(self, val): awg_nr = (self.channel_I()-1)//2 assert awg_nr == (self.channel_Q()-1)//2 @@ -833,7 +869,6 @@ def _set_channel_range(self, val): AWG.set('sigouts_{}_direct'.format(self.channel_Q()-1), 0) AWG.set('sigouts_{}_range'.format(self.channel_Q()-1), val) - def _get_channel_range(self): awg_nr = (self.channel_I()-1)//2 assert awg_nr == (self.channel_Q()-1)//2 @@ -865,14 +900,14 @@ def _set_channel_amp(self, val): g1 = self.mixer_alpha()*1/np.cos(np.radians(self.mixer_phi())) if np.abs(val*g0) > 1.0 or np.abs(val*g1) > 1.0: - raise Exception('Resulting amplitude from mixer parameters '+\ - 'exceed the maximum channel amplitude') - # print('Resulting amplitude from mixer parameters '+\ - # 'exceed the maximum channel amplitude') - # if np.abs(val*g0): - # g0 = 1/val - # if np.abs(val*g1): - # g1 = 1/val + # raise Exception('Resulting amplitude from mixer parameters '+\ + # 'exceed the maximum channel amplitude') + print('Resulting amplitude from mixer parameters '+\ + 'exceed the maximum channel amplitude') + if np.abs(val*g0): + g0 = 1/val + if np.abs(val*g1): + g1 = 1/val AWG.set('awgs_{}_outputs_0_gains_0'.format(awg_nr), val) AWG.set('awgs_{}_outputs_1_gains_0'.format(awg_nr), 0) @@ -993,10 +1028,10 @@ def load_waveforms_onto_AWG_lookuptable( ########################################################################## def apply_mixer_predistortion_corrections(self, wave_dict): - M = wf.mixer_predistortion_matrix(self.mixer_alpha(), self.mixer_phi()) - for key, val in wave_dict.items(): - wave_dict[key] = np.dot(M, val) - return wave_dict + M = wf.mixer_predistortion_matrix(self.mixer_alpha(), self.mixer_phi()) + for key, val in wave_dict.items(): + wave_dict[key] = np.dot(M, val) + return wave_dict def generate_standard_waveforms( self, apply_predistortion_matrix: bool=True): @@ -1046,6 +1081,18 @@ def generate_standard_waveforms( motzoi=0, delay=self.pulse_delay()) + elif waveform['type'] == 'fh': + amp = theta_to_amp(theta=waveform['theta'], + amp180=self.mw_fh_amp180()) + self._wave_dict[idx] = self.wf_func( + amp=amp, + phase=waveform['phi'], + sigma_length=self.mw_gauss_width(), + f_modulation=self.mw_fh_modulation(), + sampling_rate=self.sampling_rate(), + motzoi=0, + delay=self.pulse_delay()) + elif waveform['type'] == 'raw-drag': self._wave_dict[idx] = self.wf_func( **waveform["drag_pars"]) @@ -1091,6 +1138,23 @@ def generate_standard_waveforms( sampling_rate=self.sampling_rate(), length=self.mw_gauss_width()*4, ) + + elif waveform['type'] == 'lru': + self._wave_dict[idx] = self.lru_func( + t_total = self.mw_lru_duration(), + t_rise = self.mw_lru_rise_duration(), + f_modulation = f_modulation, + amplitude = self.mw_lru_amplitude(), + sampling_rate = self.sampling_rate()) + + elif waveform['type'] == 'lru_idle': + # fill codewords that are used to idle when using LRU + # with a zero waveform + self._wave_dict[idx] = wf.block_pulse( + amp=0, + sampling_rate=self.sampling_rate(), + length=self.mw_lru_duration(), + ) else: raise ValueError @@ -1114,14 +1178,21 @@ def upload_single_qubit_phase_corrections(self): } # manual waveform index 1-to-1 mapping - for ind in np.arange(0, 60, 1): + for ind in np.arange(0, 51, 1): commandtable_dict['table'] += [{"index": int(ind), "waveform": {"index": int(ind)} }] # add phase corrections to the end of the codeword space - # the first 8 positions are for parking related phase corrections, + # the first position is for the LRU phase correction + # the 8 positions after that, are for parking related phase corrections, # the last 4 are for phase corrections due to gate in corresponding direction + # phase = self.parameters['LRU_virtual_q_ph_corr']() + phase = 0 + commandtable_dict['table'] += [{"index": int(51), + "phase0": {"value": float(phase), "increment": True}, + "phase1": {"value": float(phase), "increment": True} + }] phase_corr_inds = np.arange(52,64,1) for step, cw in enumerate(phase_corr_inds[:8]): phase = self.parameters[f"vcz_virtual_q_ph_corr_step_{step+1}"]() @@ -1705,7 +1776,6 @@ def set_VQE_lutmap(self): 'wave_ch{}_cw{:03}'.format(self.channel_Q(), cw_idx)) self.LutMap(LutMap) - # Not the cleanest inheritance but whatever - MAR Nov 2017 class QWG_VSM_MW_LutMan(AWG8_VSM_MW_LutMan): @@ -1746,3 +1816,310 @@ def _add_channel_params(self): # 'wave_ch3_cw{:03}'.format(cw_idx), # 'wave_ch4_cw{:03}'.format(cw_idx)) # self.LutMap(LutMap) + + +class LRU_MW_LutMan(Base_MW_LutMan): + ''' + Microwave HDAWG lutman for LRU pulses. + ''' + def __init__(self, name, **kw): + self._num_channels = 8 + super().__init__(name, **kw) + self.sampling_rate(2.4e9) + + ########################################################################## + # Base_LutMan overrides + ########################################################################## + + def _add_channel_params(self): + super()._add_channel_params() + self.add_parameter( + 'channel_amp', unit='a.u.', vals=vals.Numbers(0, 1), + set_cmd=self._set_channel_amp, get_cmd=self._get_channel_amp, + docstring=('using the channel amp as additional' + 'parameter to allow rabi-type experiments without' + 'wave reloading. Should not be using VSM')) + self.add_parameter( + 'channel_range', unit='V', vals=vals.Enum(0.2, 0.4, 0.6, 0.8, 1, 2, 3, 4, 5), + set_cmd=self._set_channel_range, get_cmd=self._get_channel_range, + docstring=('defines the channel range for the AWG sequencer output')) + + # Setting variable to track channel amplitude since it cannot be directly extracted from + # HDAWG while using real-time modulation (because of mixer amplitude imbalance corrections) + self.channel_amp_value = 0 + + def _add_waveform_parameters(self): + # defined here so that the VSM based LutMan can overwrite this + self.wf_func = wf.mod_gauss + self.spec_func = wf.block_pulse + self.lru_func = wf.mod_lru_pulse + + self._add_channel_params() + self._add_mixer_corr_pars() + + self.add_parameter('cfg_sideband_mode', + vals=vals.Enum('real-time', 'static'), + initial_value='static', + parameter_class=ManualParameter) + # Parameters for leakage reduction unit pulse. + self.add_parameter('mw_lru_modulation', unit='Hz', + docstring=('Modulation frequency for LRU pulse.'), + vals=vals.Numbers(), + parameter_class=ManualParameter, initial_value=0.0e6) + self.add_parameter('mw_lru_amplitude', unit='frac', + docstring=('amplitude for LRU pulse.'), + vals=vals.Numbers(-1, 1), + parameter_class=ManualParameter, initial_value=.8) + self.add_parameter('mw_lru_duration', unit='s', + vals=vals.Numbers(), + parameter_class=ManualParameter, + initial_value=300e-9) + self.add_parameter('mw_lru_rise_duration', unit='s', + vals=vals.Numbers(), + parameter_class=ManualParameter, + initial_value=30e-9) + + def _set_channel_range(self, val): + awg_nr = (self.channel_I()-1)//2 + assert awg_nr == (self.channel_Q()-1)//2 + assert self.channel_I() < self.channel_Q() + AWG = self.AWG.get_instr() + if val == 0.8: + AWG.set('sigouts_{}_range'.format(self.channel_I()-1), .8) + AWG.set('sigouts_{}_direct'.format(self.channel_I()-1), 1) + AWG.set('sigouts_{}_range'.format(self.channel_Q()-1), .8) + AWG.set('sigouts_{}_direct'.format(self.channel_Q()-1), 1) + else: + AWG.set('sigouts_{}_direct'.format(self.channel_I()-1), 0) + AWG.set('sigouts_{}_range'.format(self.channel_I()-1), val) + AWG.set('sigouts_{}_direct'.format(self.channel_Q()-1), 0) + AWG.set('sigouts_{}_range'.format(self.channel_Q()-1), val) + + def _get_channel_range(self): + awg_nr = (self.channel_I()-1)//2 + assert awg_nr == (self.channel_Q()-1)//2 + assert self.channel_I() < self.channel_Q() + + AWG = self.AWG.get_instr() + val = AWG.get('sigouts_{}_range'.format(self.channel_I()-1)) + assert val == AWG.get('sigouts_{}_range'.format(self.channel_Q()-1)) + return val + + def _set_channel_amp(self, val): + AWG = self.AWG.get_instr() + awg_nr = (self.channel_I()-1)//2 + # Enforce assumption that channel I preceeds channel Q and share AWG + assert awg_nr == (self.channel_Q()-1)//2 + assert self.channel_I() < self.channel_Q() + self.channel_amp_value = val + + if self.cfg_sideband_mode() == 'static': + AWG.set('awgs_{}_outputs_{}_gains_0'.format(awg_nr, 0), val) + AWG.set('awgs_{}_outputs_{}_gains_0'.format(awg_nr, 1), 0) + AWG.set('awgs_{}_outputs_{}_gains_1'.format(awg_nr, 0), 0) + AWG.set('awgs_{}_outputs_{}_gains_1'.format(awg_nr, 1), val) + + # In case of sideband modulation mode 'real-time', amplitudes have to be set + # according to modulation matrix + elif self.cfg_sideband_mode() == 'real-time': + g0 = np.tan(np.radians(self.mixer_phi())) + g1 = self.mixer_alpha()*1/np.cos(np.radians(self.mixer_phi())) + + if np.abs(val*g0) > 1.0 or np.abs(val*g1) > 1.0: + raise Exception('Resulting amplitude from mixer parameters '+\ + 'exceed the maximum channel amplitude') + # print('Resulting amplitude from mixer parameters '+\ + # 'exceed the maximum channel amplitude') + # if np.abs(val*g0): + # g0 = 1/val + # if np.abs(val*g1): + # g1 = 1/val + + AWG.set('awgs_{}_outputs_0_gains_0'.format(awg_nr), val) + AWG.set('awgs_{}_outputs_1_gains_0'.format(awg_nr), 0) + AWG.set('awgs_{}_outputs_0_gains_1'.format(awg_nr), val*g0) + AWG.set('awgs_{}_outputs_1_gains_1'.format(awg_nr), val*g1) + else: + raise KeyError('Unexpected value for parameter sideband mode.') + + def _get_channel_amp(self): + AWG = self.AWG.get_instr() + awg_nr = (self.channel_I()-1)//2 + # Enforce assumption that channel I precedes channel Q and share AWG + assert awg_nr == (self.channel_Q()-1)//2 + assert self.channel_I() < self.channel_Q() + + vals = [] + if self.cfg_sideband_mode() == 'static': + vals.append(AWG.get('awgs_{}_outputs_{}_gains_0'.format(awg_nr, 0))) + vals.append(AWG.get('awgs_{}_outputs_{}_gains_1'.format(awg_nr, 0))) + vals.append(AWG.get('awgs_{}_outputs_{}_gains_0'.format(awg_nr, 1))) + vals.append(AWG.get('awgs_{}_outputs_{}_gains_1'.format(awg_nr, 1))) + assert vals[0]==vals[4] + assert vals[1]==vals[2]==0 + + # In case of sideband modulation mode 'real-time', amplitudes have to be set + # according to modulation matrix + elif self.cfg_sideband_mode() == 'real-time': + vals.append(self.channel_amp_value) + + return vals[0] + + def load_waveform_onto_AWG_lookuptable( + self, wave_id: str, regenerate_waveforms: bool=False): + """ + Load a waveform into the AWG. + + Args: + wave_id: can be either the "name" of a waveform or + the integer key in self._wave_dict. + regenerate_waveforms (bool) : if True regenerates all waveforms + """ + if regenerate_waveforms: + self.generate_standard_waveforms() + + if wave_id not in self.LutMap().keys(): + wave_id = get_wf_idx_from_name(wave_id, self.LutMap()) + + wf_I, wf_Q = self._wave_dict[wave_id] + + wf_name_I = 'wave_ch{}_cw{:03}'.format(self.channel_I(), wave_id) + wf_name_Q = 'wave_ch{}_cw{:03}'.format(self.channel_Q(), wave_id) + + self.AWG.get_instr().set(wf_name_I, wf_I) + self.AWG.get_instr().set(wf_name_Q, wf_Q) + + def load_waveforms_onto_AWG_lookuptable( + self, regenerate_waveforms: bool=True, stop_start: bool = True, + force_load_sequencer_program: bool=False): + """ + Loads all waveforms specified in the LutMap to an AWG. + + Args: + regenerate_waveforms (bool): if True calls + generate_standard_waveforms before uploading. + stop_start (bool): if True stops and starts the AWG. + force_load_sequencer_program (bool): if True forces a new compilation + and upload of the program on the sequencer. FIXME: parameter pack incompatible with base class + """ + # Uploading the codeword program (again) is needed to link the new + # waveforms in case the user has changed the codeword mode. + if force_load_sequencer_program: + # This ensures only the channels that are relevant get reconfigured + if 'channel_GI' in self.parameters: + awgs = [self.channel_GI()//2, self.channel_DI()//2] + else: + awgs = [self.channel_I()//2] + # Enforce assumption that channel I precedes channel Q + assert self.channel_I() < self.channel_Q() + assert (self.channel_I())//2 < (self.channel_Q())//2 + + self.AWG.get_instr().upload_codeword_program(awgs=awgs) + + # This ensures that settings other than the sequencer program are updated + # for different sideband modulation modes + if self.cfg_sideband_mode() == 'static': + self.AWG.get_instr().cfg_sideband_mode('static') + # Turn off modulation modes + self.AWG.get_instr().set('awgs_{}_outputs_0_modulation_mode'.format((self.channel_I()-1)//2), 0) + self.AWG.get_instr().set('awgs_{}_outputs_1_modulation_mode'.format((self.channel_Q()-1)//2), 0) + + elif self.cfg_sideband_mode() == 'real-time': + if (self.channel_I()-1)//2 != (self.channel_Q()-1)//2: + raise KeyError('In real-time sideband mode, channel I/Q should share same awg nr.') + self.AWG.get_instr().cfg_sideband_mode('real-time') + + # Set same oscillator for I/Q pair and same harmonic + self.AWG.get_instr().set('sines_{}_oscselect'.format(self.channel_I()-1), (self.channel_I()-1)//2) + self.AWG.get_instr().set('sines_{}_oscselect'.format(self.channel_Q()-1), (self.channel_I()-1)//2) + self.AWG.get_instr().set('sines_{}_harmonic'.format(self.channel_I()-1), 1) + self.AWG.get_instr().set('sines_{}_harmonic'.format(self.channel_Q()-1), 1) + # Create respective cossine/sin signals for modulation through phase-shift + self.AWG.get_instr().set('sines_{}_phaseshift'.format(self.channel_I()-1), 90) + self.AWG.get_instr().set('sines_{}_phaseshift'.format(self.channel_Q()-1), 0) + # Create correct modulation modeI + self.AWG.get_instr().set('awgs_{}_outputs_0_modulation_mode'.format((self.channel_I()-1)//2), 6) + self.AWG.get_instr().set('awgs_{}_outputs_1_modulation_mode'.format((self.channel_Q()-1)//2), 6) + else: + raise ValueError('Unexpected value for parameter cfg_sideband_mode.') + + super().load_waveforms_onto_AWG_lookuptable( + regenerate_waveforms=regenerate_waveforms, + stop_start=stop_start) + + def apply_mixer_predistortion_corrections(self, wave_dict): + M = wf.mixer_predistortion_matrix(self.mixer_alpha(), self.mixer_phi()) + for key, val in wave_dict.items(): + wave_dict[key] = np.dot(M, val) + return wave_dict + + def generate_standard_waveforms( + self, apply_predistortion_matrix: bool=True): + # FIXME: looks very similar to overridden function in Base_MW_LutMan + self._wave_dict = OrderedDict() + + if self.cfg_sideband_mode() == 'static': + f_modulation = self.mw_lru_modulation() + elif self.cfg_sideband_mode() == 'real-time': + f_modulation = 0 + if ((self.channel_I()-1)//2 != (self.channel_Q()-1)//2): + raise KeyError('In real-time sideband mode, channel I/Q should share same awg group.') + + self.AWG.get_instr().set('oscs_{}_freq'.format((self.channel_I()-1)//2), + self.mw_lru_modulation()) + else: + raise KeyError('Unexpected argument for cfg_sideband_mode') + + # lutmap is expected to obey lutmap mw schema + for idx, waveform in self.LutMap().items(): + if waveform['type'] == 'lru': + self._wave_dict[idx] = self.lru_func( + t_total = self.mw_lru_duration(), + t_rise = self.mw_lru_rise_duration(), + f_modulation = f_modulation, + amplitude = self.mw_lru_amplitude(), + sampling_rate = self.sampling_rate()) + + elif waveform['type'] == 'lru_idle': + # fill codewords that are used to idle when using LRU + # with a zero waveform + self._wave_dict[idx] = wf.block_pulse( + amp=0, + sampling_rate=self.sampling_rate(), + length=self.mw_lru_duration(), + ) + else: + raise ValueError + + # Add predistortions + test + if (self.mixer_apply_predistortion_matrix() and apply_predistortion_matrix and + self.cfg_sideband_mode() == 'static'): + self._wave_dict = self.apply_mixer_predistortion_corrections( + self._wave_dict) + return self._wave_dict + + def upload_single_qubit_phase_corrections(self): + commandtable_dict = { + "$schema": "http://docs.zhinst.com/hdawg/commandtable/v2/schema", + "header": {"version": "0.2"}, + "table": [] + } + + # manual waveform index 1-to-1 mapping + for ind in np.arange(0, 64, 1): + commandtable_dict['table'] += [{"index": int(ind), + "waveform": {"index": int(ind)} + }] + # NOTE: Whenever the command table is used, the phase offset between I and Q channels on + # the HDAWG for real-time modulation has to be initialized from the table itself. + # Index 1023 will be reserved for this (it should no be used for codeword triggering) + commandtable_dict['table'] += [{"index": 1023, + "phase0": {"value": 90.0, "increment": False}, + "phase1": {"value": 0.0, "increment": False} + }] + + # get internal awg sequencer number (indexed 0,1,2,3) + awg_nr = (self.channel_I() - 1) // 2 + commandtable_returned, status = self.AWG.get_instr().upload_commandtable(commandtable_dict, awg_nr) + + return commandtable_returned, status \ No newline at end of file diff --git a/pycqed/instrument_drivers/meta_instrument/LutMans/ro_lutman.py b/pycqed/instrument_drivers/meta_instrument/LutMans/ro_lutman.py index 8a8991d92c..bfa84f3330 100644 --- a/pycqed/instrument_drivers/meta_instrument/LutMans/ro_lutman.py +++ b/pycqed/instrument_drivers/meta_instrument/LutMans/ro_lutman.py @@ -50,6 +50,9 @@ def __init__( feedline_map='S7', **kw ): + if num_res > 10: # FIXME: this is UHFQA limit + raise ValueError('At most 10 resonators can be read out.') + self._num_res = num_res self._feedline_number = feedline_number self._resonator_codeword_bit_mapping: List[int] = kw.pop('force_bit_map', None) diff --git a/pycqed/instrument_drivers/meta_instrument/MW_crosstalk b/pycqed/instrument_drivers/meta_instrument/MW_crosstalk new file mode 100644 index 0000000000..e3065c2b41 --- /dev/null +++ b/pycqed/instrument_drivers/meta_instrument/MW_crosstalk @@ -0,0 +1,44 @@ +######################### +## MW crosstalk matrix +######################### +qubits_right = [D1, X1] +qubits_middle = [D3, D7, D2, X3, Z1, X2, Z3, D5, D4] +qubits_left = [D9, D8, X4, Z4, Z2, D6] +qubits = qubits_right + qubits_middle + qubits_left +file_cfg = gc.generate_config( + in_filename=input_file, + out_filename=config_fn, + mw_pulse_duration=1000, + ro_duration=2200, + flux_pulse_duration=40, + init_duration=200000) +mw_crosstalk = np.zeros((len(qubits),len(qubits)))+80 +for i,qb in enumerate(qubits): + for j,cross_qb in enumerate(qubits): + qb.mw_gauss_width(250e-9) + cross_qb.mw_gauss_width(250e-9) + mw_c_qb = cross_qb.instr_LutMan_MW.get_instr() + mw_c_qb.cfg_sideband_mode('static') + + if i == j: + mw_crosstalk[i][j] = None + else: + mw_crosstalk[i][j] = qb.measure_mw_crosstalk(cross_driving_qb=cross_qb.name) + +file_cfg = gc.generate_config( + in_filename=input_file, + out_filename=config_fn, + mw_pulse_duration=20, + ro_duration=2200, + flux_pulse_duration=40, + init_duration=200000) +for i,qb in enumerate(qubits): + qb.mw_gauss_width(5e-9) + qb.mw_gauss_width(5e-9) + qb = qb.instr_LutMan_MW.get_instr() + qb.cfg_sideband_mode('real-time') +MW_LO_1.frequency(6.200e9) +MW_LO_2.frequency(6.140e9) +MW_LO_3.frequency(5.090e9) +MW_LO_4.frequency(5.070e9) +MW_LO_5.frequency(6.950e9) \ No newline at end of file diff --git a/pycqed/instrument_drivers/meta_instrument/Surface17_dependency_graph.py b/pycqed/instrument_drivers/meta_instrument/Surface17_dependency_graph.py new file mode 100644 index 0000000000..2231a9bd26 --- /dev/null +++ b/pycqed/instrument_drivers/meta_instrument/Surface17_dependency_graph.py @@ -0,0 +1,4443 @@ +import numpy as np +import matplotlib.pyplot as plt +from importlib import reload +import autodepgraph +reload(autodepgraph) +from autodepgraph import AutoDepGraph_DAG +from pycqed.measurement import hdf5_data as h5d +from pycqed.analysis_v2 import measurement_analysis as ma2 +from pycqed.utilities.general import get_gate_directions, get_nearest_neighbors,\ + get_parking_qubits +from pycqed.measurement import sweep_functions as swf +import pycqed.instrument_drivers.library.DIO as DIO +from pycqed.utilities.general import check_keyboard_interrupt, print_exception +from pycqed.instrument_drivers.meta_instrument.device_object_CCL import DeviceCCL as Device +from pycqed.instrument_drivers.meta_instrument.qubit_objects.CCL_Transmon import CCLight_Transmon as Transmon +from pycqed.instrument_drivers.meta_instrument.LutMans.flux_lutman_vcz import HDAWG_Flux_LutMan as FluxLutMan +from pycqed.qce_utils.control_interfaces.connectivity_surface_code import Repetition9Layer, QubitIDObj +from pycqed.analysis.tools import cryoscope_tools as ct +from pycqed.measurement import detector_functions +############################################################################### +# Single- and Two- qubit gate calibration graph +############################################################################### +import os +import logging +import pycqed as pq +from pycqed.measurement.openql_experiments import generate_CC_cfg as gc +input_file = os.path.join(pq.__path__[0], 'measurement', + 'openql_experiments', 'config_cc_s17_direct_iq.json.in') +config_fn = os.path.join(pq.__path__[0], 'measurement', + 'openql_experiments', 'output_cc_s17','config_cc_s17_direct_iq.json') +logging.basicConfig(level=logging.INFO) + +class Full_calibration(AutoDepGraph_DAG): + def __init__(self, + name: str, + station, + **kwargs): + super().__init__(name, **kwargs) + self.station = station + self.create_dep_graph() + + def create_dep_graph(self): + ''' + Dependency graph for the calibration of + single-qubit gates. + ''' + print(f'Creating dependency graph for full gate calibration') + ############################## + # Grah nodes + ############################## + module_name = 'pycqed.instrument_drivers.meta_instrument.Surface17_dependency_graph' + + ########################################################################## + # Single qubit Graph + ########################################################################## + Qubits = [ + 'D1', 'D2', 'D3', + 'D4', 'D5', 'D6', + 'D7', 'D8', 'D9', + # 'X1', 'X3', 'X4', + 'Z1', 'Z2', 'Z3', 'Z4', + ] + + for qubit in Qubits: + self.add_node(f'{qubit} Prepare for gate calibration', + calibrate_function=module_name+'.prepare_for_single_qubit_gate_calibration', + calibrate_function_args={ + 'qubit' : qubit, + 'station': self.station, + }) + + self.add_node(f'{qubit} Frequency', + calibrate_function=qubit+'.calibrate_frequency_ramsey', + calibrate_function_args={ + 'steps':[3, 10, 30], + 'disable_metadata': True}) + + self.add_node(f'{qubit} Flipping', + calibrate_function=module_name+'.Flipping_wrapper', + calibrate_function_args={ + 'qubit' : qubit, + 'station': self.station, + }) + + self.add_node(f'{qubit} Motzoi', + calibrate_function=module_name+'.Motzoi_wrapper', + calibrate_function_args={ + 'qubit': qubit, + 'station': self.station, + }) + + self.add_node(f'{qubit} AllXY', + calibrate_function=module_name+'.AllXY_wrapper', + calibrate_function_args={ + 'qubit': qubit, + 'station': self.station, + }) + + self.add_node(f'{qubit} Readout', + calibrate_function=module_name+'.SSRO_wrapper', + calibrate_function_args={ + 'qubit': qubit, + 'station': self.station, + }) + + self.add_node(f'{qubit} T1', + calibrate_function=module_name+'.T1_wrapper', + calibrate_function_args={ + 'qubit': qubit, + 'station': self.station, + }) + + self.add_node(f'{qubit} T2', + calibrate_function=module_name+'.T2_wrapper', + calibrate_function_args={ + 'qubit': qubit, + 'station': self.station, + }) + + self.add_node(f'{qubit} Randomized Benchmarking', + calibrate_function=module_name+'.Randomized_benchmarking_wrapper', + calibrate_function_args={ + 'qubit': qubit, + 'station': self.station, + }) + + # self.add_node(f'{qubit} drive mixer calibration', + # calibrate_function=module_name+'.drive_mixer_wrapper', + # calibrate_function_args={ + # 'qubit': qubit, + # 'station': self.station, + # }) + + ############################## + # Node depdendencies + ############################## + self.add_edge(f'{qubit} Frequency', + f'{qubit} Prepare for gate calibration') + + self.add_edge(f'{qubit} Flipping', + f'{qubit} Frequency') + + self.add_edge(f'{qubit} Motzoi', + f'{qubit} Frequency') + + self.add_edge(f'{qubit} AllXY', + f'{qubit} Flipping') + + self.add_edge(f'{qubit} AllXY', + f'{qubit} Motzoi') + + self.add_edge(f'{qubit} Randomized Benchmarking', + f'{qubit} AllXY') + + self.add_edge(f'{qubit} Randomized Benchmarking', + f'{qubit} Readout') + + self.add_edge(f'{qubit} Randomized Benchmarking', + f'{qubit} T1') + + self.add_edge(f'{qubit} Randomized Benchmarking', + f'{qubit} T2') + + self.add_node(f'Save snapshot single-qubit', + calibrate_function=module_name+'.save_snapshot_metadata', + calibrate_function_args={ + 'station': self.station, + }) + for qubit in Qubits: + self.add_edge(f'Save snapshot single-qubit', + f'{qubit} Randomized Benchmarking') + + + ########################################################################## + # Two qubit Graph + ########################################################################## + Qubit_pairs = [ + ['Z3', 'D7'], + ['D5', 'Z1'], + ['Z4', 'D9'], + ['Z1', 'D2'], + ['D4', 'Z3'], + ['D6', 'Z4'], + ['Z4', 'D8'], + ['D4', 'Z1'], + ['D6', 'Z2'], + ['Z2', 'D3'], + ['Z1', 'D1'], + ['D5', 'Z4'], + ['X1', 'D2'], + ['D6', 'X2'], + ['X3', 'D8'], + ['X1', 'D1'], + ['D5', 'X2'], + ['X3', 'D7'], + ['X4', 'D9'], + ['D5', 'X3'], + ['X2', 'D3'], + ['X4', 'D8'], + ['D4', 'X3'], + ['X2', 'D2'], + ] + # Single-qubit nodes + Qubits = np.unique(np.array(Qubit_pairs).flatten()) + for q in Qubits: + self.add_node(f'{q} Flux arc', + calibrate_function=module_name+'.Flux_arc_wrapper', + calibrate_function_args={ + 'Qubit' : q, + 'station': self.station, + }) + # Two-qubit nodes + QL_detunings = { + # After detuning search (Yuejie&Sean) + ('D5', 'X2') : 60e6, + ('D5', 'X3') : 110e6, + ('Z1', 'D1') : 380e6, + ('Z1', 'D2') : 310e6, + ('X1', 'D1') : 0e6, + ('X3', 'D8') : 60e6, + ('Z4', 'D8') : 100e6, + ('X1', 'D2') : 165e6, # After second round search + } + for pair in Qubit_pairs: + self.add_node(f'{pair[0]}, {pair[1]} Chevron', + calibrate_function=module_name+'.Chevron_wrapper', + calibrate_function_args={ + 'qH' : pair[0], + 'qL' : pair[1], + 'station': self.station, + 'qL_det': QL_detunings[tuple(pair)] \ + if tuple(pair) in QL_detunings.keys() else 0 + }) + + self.add_node(f'{pair[0]}, {pair[1]} SNZ tmid', + calibrate_function=module_name+'.SNZ_tmid_wrapper', + calibrate_function_args={ + 'qH' : pair[0], + 'qL' : pair[1], + 'station': self.station + }) + + self.add_node(f'{pair[0]}, {pair[1]} SNZ AB', + calibrate_function=module_name+'.SNZ_AB_wrapper', + calibrate_function_args={ + 'qH' : pair[0], + 'qL' : pair[1], + 'station': self.station + }) + + self.add_node(f'{pair[0]}, {pair[1]} Asymmetry', + calibrate_function=module_name+'.Asymmetry_wrapper', + calibrate_function_args={ + 'qH' : pair[0], + 'qL' : pair[1], + 'station': self.station + }) + + self.add_node(f'{pair[0]}, {pair[1]} 1Q phase', + calibrate_function=module_name+'.Single_qubit_phase_calibration_wrapper', + calibrate_function_args={ + 'qH' : pair[0], + 'qL' : pair[1], + 'station': self.station + }) + + self.add_node(f'{pair[0]}, {pair[1]} 2Q IRB', + calibrate_function=module_name+'.TwoQ_Randomized_benchmarking_wrapper', + calibrate_function_args={ + 'qH' : pair[0], + 'qL' : pair[1], + 'station': self.station + }) + + # Save snpashot + self.add_node('Save snapshot two-qubit', + calibrate_function=module_name+'.save_snapshot_metadata', + calibrate_function_args={ + 'station': self.station, + 'Two_qubit_freq_trajectories': True + }) + + ############################## + # Node depdendencies + ############################## + for Q_pair in Qubit_pairs: + self.add_edge('Save snapshot two-qubit', + f'{Q_pair[0]}, {Q_pair[1]} 2Q IRB') + + self.add_edge(f'{Q_pair[0]}, {Q_pair[1]} 2Q IRB', + f'{Q_pair[0]}, {Q_pair[1]} 1Q phase') + + self.add_edge(f'{Q_pair[0]}, {Q_pair[1]} 1Q phase', + f'{Q_pair[0]}, {Q_pair[1]} Asymmetry') + + self.add_edge(f'{Q_pair[0]}, {Q_pair[1]} Asymmetry', + f'{Q_pair[0]}, {Q_pair[1]} SNZ AB') + + self.add_edge(f'{Q_pair[0]}, {Q_pair[1]} SNZ AB', + f'{Q_pair[0]}, {Q_pair[1]} SNZ tmid') + + self.add_edge(f'{Q_pair[0]}, {Q_pair[1]} SNZ tmid', + f'{Q_pair[0]}, {Q_pair[1]} Chevron') + + self.add_edge(f'{Q_pair[0]}, {Q_pair[1]} Chevron', + f'{Q_pair[0]} Flux arc') + self.add_edge(f'{Q_pair[0]}, {Q_pair[1]} Chevron', + f'{Q_pair[1]} Flux arc') + + ############################# + # Final dependency + ############################# + for q in Qubits: + self.add_edge(f'{q} Flux arc', + f'Save snapshot single-qubit') + + ########################################################################## + # Parity checks Graph + ########################################################################## + stabilizers = ['Z1', 'Z2', 'Z3', 'Z4', + # 'X1', 'X2', 'X3', 'X4', + ] + + for stab in stabilizers: + self.add_node(f'{stab} Horizontal calibration', + calibrate_function=module_name+'.Horizontal_calibration_wrapper', + calibrate_function_args={ + 'stabilizer_qubit': stab, + 'station': self.station + }) + + self.add_node(f'{stab} Ancilla phase verification', + calibrate_function=module_name+'.Measure_parity_check_phase_wrapper', + calibrate_function_args={ + 'stabilizer_qubit': stab, + 'station': self.station + }) + + self.add_node(f'{stab} Data-qubit phase correction', + calibrate_function=module_name+'.Data_qubit_phase_calibration_wrapper', + calibrate_function_args={ + 'stabilizer_qubit': stab, + 'station': self.station + }) + + self.add_node(f'{stab} Parity assignment fidelity', + calibrate_function=module_name+'.Parity_check_fidelity_wrapper', + calibrate_function_args={ + 'stabilizer_qubit': stab, + 'station': self.station + }) + + self.add_node(f'{stab} Parity repeatability', + calibrate_function=module_name+'.Parity_check_repeatability_wrapper', + calibrate_function_args={ + 'stabilizer_qubit': stab, + 'station': self.station + }) + + # Save snpashot + self.add_node('Save snapshot parity-checks', + calibrate_function=module_name+'.save_snapshot_metadata', + calibrate_function_args={ + 'station': self.station, + 'parity_check': True, + }) + ############################## + # Node depdendencies + ############################## + for stab in stabilizers: + self.add_edge('Save snapshot parity-checks', + f'{stab} Parity repeatability') + + self.add_edge(f'{stab} Parity repeatability', + f'{stab} Parity assignment fidelity') + + self.add_edge(f'{stab} Parity assignment fidelity', + f'{stab} Data-qubit phase correction') + + self.add_edge(f'{stab} Data-qubit phase correction', + f'{stab} Ancilla phase verification') + + self.add_edge(f'{stab} Ancilla phase verification', + f'{stab} Horizontal calibration') + + self.add_edge(f'{stab} Horizontal calibration', + f'Save snapshot two-qubit') + + ############################## + # Create graph + ############################## + self.cfg_plot_mode = 'svg' + self.update_monitor() + self.cfg_svg_filename + url = self.open_html_viewer() + print('Dependency graph created at ' + url) + +############################################################################### +# Single qubit gate calibration graph +############################################################################### +class Single_qubit_gate_calibration(AutoDepGraph_DAG): + def __init__(self, + name: str, + station, + **kwargs): + super().__init__(name, **kwargs) + self.station = station + self.create_dep_graph() + + def create_dep_graph(self): + ''' + Dependency graph for the calibration of + single-qubit gates. + ''' + print(f'Creating dependency graph for single-qubit gate calibration') + ############################## + # Grah nodes + ############################## + module_name = 'pycqed.instrument_drivers.meta_instrument.Surface17_dependency_graph' + + Qubits = [ + 'D1', 'D2', 'D3', + 'D4', 'D5', 'D6', + 'D7', 'D8', 'D9', + # 'X1', 'X3', 'X4', + 'Z1', 'Z2', 'Z3', 'Z4', + ] + + for qubit in Qubits: + self.add_node(f'{qubit} Prepare for gate calibration', + calibrate_function=module_name+'.prepare_for_single_qubit_gate_calibration', + calibrate_function_args={ + 'qubit' : qubit, + 'station': self.station, + }) + + self.add_node(f'{qubit} Frequency', + calibrate_function=qubit+'.calibrate_frequency_ramsey', + calibrate_function_args={ + 'steps':[3, 10, 30], + 'disable_metadata': True}) + + self.add_node(f'{qubit} Flipping', + calibrate_function=module_name+'.Flipping_wrapper', + calibrate_function_args={ + 'qubit' : qubit, + 'station': self.station, + }) + + self.add_node(f'{qubit} Motzoi', + calibrate_function=module_name+'.Motzoi_wrapper', + calibrate_function_args={ + 'qubit': qubit, + 'station': self.station, + }) + + self.add_node(f'{qubit} AllXY', + calibrate_function=module_name+'.AllXY_wrapper', + calibrate_function_args={ + 'qubit': qubit, + 'station': self.station, + }) + + self.add_node(f'{qubit} Readout', + calibrate_function=module_name+'.SSRO_wrapper', + calibrate_function_args={ + 'qubit': qubit, + 'station': self.station, + }) + + self.add_node(f'{qubit} T1', + calibrate_function=module_name+'.T1_wrapper', + calibrate_function_args={ + 'qubit': qubit, + 'station': self.station, + }) + + self.add_node(f'{qubit} T2', + calibrate_function=module_name+'.T2_wrapper', + calibrate_function_args={ + 'qubit': qubit, + 'station': self.station, + }) + + self.add_node(f'{qubit} Randomized Benchmarking', + calibrate_function=module_name+'.Randomized_benchmarking_wrapper', + calibrate_function_args={ + 'qubit': qubit, + 'station': self.station, + }) + + # self.add_node(f'{qubit} drive mixer calibration', + # calibrate_function=module_name+'.drive_mixer_wrapper', + # calibrate_function_args={ + # 'qubit': qubit, + # 'station': self.station, + # }) + + ############################## + # Node depdendencies + ############################## + self.add_edge(f'{qubit} Frequency', + f'{qubit} Prepare for gate calibration') + + self.add_edge(f'{qubit} Flipping', + f'{qubit} Frequency') + + self.add_edge(f'{qubit} Motzoi', + f'{qubit} Frequency') + + self.add_edge(f'{qubit} AllXY', + f'{qubit} Flipping') + + self.add_edge(f'{qubit} AllXY', + f'{qubit} Motzoi') + + self.add_edge(f'{qubit} Randomized Benchmarking', + f'{qubit} AllXY') + + self.add_edge(f'{qubit} Randomized Benchmarking', + f'{qubit} Readout') + + self.add_edge(f'{qubit} Randomized Benchmarking', + f'{qubit} T1') + + self.add_edge(f'{qubit} Randomized Benchmarking', + f'{qubit} T2') + + self.add_node(f'Save snapshot', + calibrate_function=module_name+'.save_snapshot_metadata', + calibrate_function_args={ + 'station': self.station, + }) + for qubit in Qubits: + self.add_edge(f'Save snapshot', + f'{qubit} Randomized Benchmarking') + + ############################## + # Create graph + ############################## + self.cfg_plot_mode = 'svg' + self.update_monitor() + self.cfg_svg_filename + url = self.open_html_viewer() + print('Dependency graph created at ' + url) + + +def prepare_for_single_qubit_gate_calibration(qubit:str, station): + ''' + Initial function to prepare qubit for calibration. + We will set all relevant parameters for mw and readout. + This is such that we only perform full preparation of + the qubit once in the graph and all additional calibrated + parameters are uploaded individually making the whole + procedure time efficient. + ''' + Q_inst = station.components[qubit] + # Set initial parameters for calibration + Q_inst.mw_gauss_width(5e-9) + Q_inst.mw_motzoi(0) + Q_inst.ro_soft_avg(1) + Q_inst.ro_acq_weight_type('optimal') + Q_inst.ro_acq_averages(2**10) + Q_inst.ro_acq_digitized(False) + # Set microwave lutman + Q_lm = Q_inst.instr_LutMan_MW.get_instr() + Q_lm.set_default_lutmap() + # Prepare for timedomain + Q_inst.prepare_for_timedomain() + return True + + +def Flipping_wrapper(qubit:str, station): + ''' + Wrapper function around flipping measurement. + Returns True if successful calibration otherwise + returns False. + ''' + station.components['MC'].live_plot_enabled(False) + station.components['nested_MC'].live_plot_enabled(False) + # Set initial parameters for calibration + Q_inst = station.components[qubit] + Q_inst.ro_soft_avg(1) + Q_inst.ro_acq_weight_type('optimal') + Q_inst.ro_acq_averages(2**10) + # Check if RO pulse has been uploaded onto UHF + # (We do this by checking if the resonator + # combinations of the RO lutman contain + # exclusively this qubit). + RO_lm = Q_inst.instr_LutMan_RO.get_instr() + _res_combs = RO_lm.resonator_combinations() + if _res_combs != [[Q_inst.cfg_qubit_nr()]]: + Q_inst.prepare_readout() + else: + # Just update detector functions (for avg and IQ) + Q_inst._prep_ro_integration_weights() + Q_inst._prep_ro_instantiate_detectors() + # Q_inst.prepare_for_timedomain() + # Run loop of experiments + nr_repetitions = 4 + for i in range(nr_repetitions): + # Prepare for timedomain + # (disable upload of waveforms on + # awg sincethese will always be the + # same if using real-time modulation.) + Q_inst.cfg_prepare_mw_awg(False) + Q_inst._prep_mw_pulses() + Q_inst.cfg_prepare_mw_awg(True) + + # perform measurement + a = Q_inst.measure_flipping( + update=True, + disable_metadata=True, + prepare_for_timedomain=False) + # if amplitude is lower than threshold + if a == True: + return True + return False + + +def Motzoi_wrapper(qubit:str, station): + ''' + Wrapper function around Motzoi measurement. + Returns True if successful calibration otherwise + returns False. + ''' + station.components['MC'].live_plot_enabled(False) + station.components['nested_MC'].live_plot_enabled(False) + # Set initial parameters for calibration + Q_inst = station.components[qubit] + Q_inst.ro_soft_avg(1) + Q_inst.ro_acq_weight_type('optimal') + Q_inst.ro_acq_averages(2**11) + # Check if RO pulse has been uploaded onto UHF + # (We do this by checking if the resonator + # combinations of the RO lutman contain + # exclusively this qubit). + RO_lm = Q_inst.instr_LutMan_RO.get_instr() + _res_combs = RO_lm.resonator_combinations() + if _res_combs != [[Q_inst.cfg_qubit_nr()]]: + Q_inst.prepare_readout() + else: + # Just update detector functions (for avg and IQ) + Q_inst._prep_ro_integration_weights() + Q_inst._prep_ro_instantiate_detectors() + # Prepare for timedomain + Q_inst._prep_mw_pulses() + # perform measurement + _range = .3 + for i in range(4): + outcome = Q_inst.calibrate_motzoi( + update=True, + motzois=np.linspace(-_range/2, _range/2, 5), + disable_metadata=True, + prepare_for_timedomain=False) + # If successfull calibration + if outcome != False: + return True + # if not increase range and try again + else: + _range += .1 + # If not successful after 4 attempts fail node + return False + + +def AllXY_wrapper(qubit:str, station): + ''' + Wrapper function around AllXY measurement. + Returns True if successful calibration otherwise + returns False. + ''' + station.components['MC'].live_plot_enabled(False) + station.components['nested_MC'].live_plot_enabled(False) + # Set initial parameters for calibration + Q_inst = station.components[qubit] + Q_inst.ro_soft_avg(1) + Q_inst.ro_acq_weight_type('optimal') + Q_inst.ro_acq_averages(2**12) + # Check if RO pulse has been uploaded onto UHF + # (We do this by checking if the resonator + # combinations of the RO lutman contain + # exclusively this qubit). + RO_lm = Q_inst.instr_LutMan_RO.get_instr() + _res_combs = RO_lm.resonator_combinations() + if _res_combs != [[Q_inst.cfg_qubit_nr()]]: + Q_inst.prepare_readout() + else: + # Just update detector functions (for avg and IQ) + Q_inst._prep_ro_integration_weights() + Q_inst._prep_ro_instantiate_detectors() + # Set microwave lutman + Q_lm = Q_inst.instr_LutMan_MW.get_instr() + Q_lm.set_default_lutmap() + # Prepare for timedomain + Q_inst._prep_mw_pulses() + out = Q_inst.measure_allxy( + disable_metadata=True, + prepare_for_timedomain=False) + if out > .02: + return False + else: + return True + + +def SSRO_wrapper(qubit:str, station): + ''' + Wrapper function around AllXY measurement. + Returns True if successful calibration otherwise + returns False. + ''' + file_cfg = gc.generate_config(in_filename=input_file, + out_filename=config_fn, + mw_pulse_duration=20, + ro_duration=600, + flux_pulse_duration=40, + init_duration=200000) + station.components['MC'].live_plot_enabled(False) + station.components['nested_MC'].live_plot_enabled(False) + # Set initial parameters for calibration + Q_inst = station.components[qubit] + Q_inst.ro_soft_avg(1) + Q_inst.ro_acq_weight_type('optimal IQ') + Q_inst.ro_acq_digitized(False) + Q_inst.ro_acq_averages(2**10) # Not used in this experiment + # Check if RO pulse has been uploaded onto UHF + # (We do this by checking if the resonator + # combinations of the RO lutman contain + # exclusively this qubit). + RO_lm = Q_inst.instr_LutMan_RO.get_instr() + _res_combs = RO_lm.resonator_combinations() + if _res_combs != [[Q_inst.cfg_qubit_nr()]]: + Q_inst.prepare_readout() + else: + # Just update detector functions (for avg and IQ) + Q_inst._prep_ro_integration_weights() + Q_inst._prep_ro_instantiate_detectors() + # Set microwave lutman + Q_lm = Q_inst.instr_LutMan_MW.get_instr() + Q_lm.set_default_lutmap() + # Prepare for timedomain + Q_inst._prep_td_sources() + Q_inst._prep_mw_pulses() + Q_inst.measure_ssro( + f_state=True, + post_select=True, + nr_shots_per_case=2**15, + disable_metadata=True, + prepare=False) + return True + + +def T1_wrapper(qubit:str, station): + ''' + Wrapper function around AllXY measurement. + Returns True if successful calibration otherwise + returns False. + ''' + station.components['MC'].live_plot_enabled(False) + station.components['nested_MC'].live_plot_enabled(False) + # Set initial parameters for calibration + Q_inst = station.components[qubit] + Q_inst.ro_soft_avg(1) + Q_inst.ro_acq_weight_type('optimal IQ') + Q_inst.ro_acq_digitized(False) + Q_inst.ro_acq_averages(2**9) + # Check if RO pulse has been uploaded onto UHF + # (We do this by checking if the resonator + # combinations of the RO lutman contain + # exclusively this qubit). + RO_lm = Q_inst.instr_LutMan_RO.get_instr() + _res_combs = RO_lm.resonator_combinations() + if _res_combs != [[Q_inst.cfg_qubit_nr()]]: + Q_inst.prepare_readout() + else: + # Just update detector functions (for avg and IQ) + Q_inst._prep_ro_integration_weights() + Q_inst._prep_ro_instantiate_detectors() + # measure + Q_inst.measure_T1( + disable_metadata=True, + prepare_for_timedomain=True) + return True + + +def T2_wrapper(qubit:str, station): + ''' + Wrapper function around AllXY measurement. + Returns True if successful calibration otherwise + returns False. + ''' + station.components['MC'].live_plot_enabled(False) + station.components['nested_MC'].live_plot_enabled(False) + # Set initial parameters for calibration + Q_inst = station.components[qubit] + Q_inst.ro_soft_avg(1) + Q_inst.ro_acq_weight_type('optimal IQ') + Q_inst.ro_acq_digitized(False) + Q_inst.ro_acq_averages(2**9) + # Check if RO pulse has been uploaded onto UHF + # (We do this by checking if the resonator + # combinations of the RO lutman contain + # exclusively this qubit). + RO_lm = Q_inst.instr_LutMan_RO.get_instr() + _res_combs = RO_lm.resonator_combinations() + if _res_combs != [[Q_inst.cfg_qubit_nr()]]: + Q_inst.prepare_readout() + else: + # Just update detector functions (for avg and IQ) + Q_inst._prep_ro_integration_weights() + Q_inst._prep_ro_instantiate_detectors() + # measure + Q_inst.measure_echo( + disable_metadata=True, + prepare_for_timedomain=False) + return True + + +def Randomized_benchmarking_wrapper(qubit:str, station): + ''' + Wrapper function around Randomized benchmarking. + Returns True if successful calibration otherwise + returns False. + ''' + station.components['MC'].live_plot_enabled(False) + station.components['nested_MC'].live_plot_enabled(False) + # Set initial parameters for calibration + Q_inst = station.components[qubit] + Q_inst.ro_soft_avg(1) + Q_inst.ro_acq_weight_type('optimal IQ') + Q_inst.ro_acq_averages(2**10) # Not used in RB + # Check if RO pulse has been uploaded onto UHF + # (We do this by checking if the resonator + # combinations of the RO lutman contain + # exclusively this qubit). + RO_lm = Q_inst.instr_LutMan_RO.get_instr() + _res_combs = RO_lm.resonator_combinations() + if _res_combs != [[Q_inst.cfg_qubit_nr()]]: + Q_inst.prepare_readout() + else: + # Just update detector functions (for avg and IQ) + Q_inst._prep_ro_integration_weights() + Q_inst._prep_ro_instantiate_detectors() + # Set microwave lutman + Q_lm = Q_inst.instr_LutMan_MW.get_instr() + Q_lm.set_default_lutmap() + # Prepare for timedomain + Q_inst._prep_td_sources() + Q_inst._prep_mw_pulses() + # measurement + Q_inst.measure_single_qubit_randomized_benchmarking( + nr_cliffords=2**np.arange(11), + nr_seeds=15, + recompile=False, + prepare_for_timedomain=False, + disable_metadata=False) + return True + + +def drive_mixer_wrapper(qubit:str, station): + ''' + Wrapper function for drive mixer calibration. + Returns True if successful calibration otherwise + returns False. + ''' + Q_inst = station.components[qubit] + SH = Q_inst.instr_SH.get_instr() + connect(qubit) + # Set initial parameters for calibration + Q_inst.ro_soft_avg(1) + # Set default microwave lutman + Q_lm = Q_inst.instr_LutMan_MW.get_instr() + Q_lm.set_default_lutmap() + # Setup Signal hound for leakage + SH.ref_lvl(-40) + SH.rbw(1e3) + SH.vbw(1e3) + # Measure leakage + Q_inst.calibrate_mixer_offsets_drive( + update=True, + ftarget=-105) + # Setup Signal hound for skewness + SH.ref_lvl(-60) + SH.rbw(1e3) + SH.vbw(1e3) + # Measure skewness + Q_inst.calibrate_mixer_skewness_drive( + update=True, + maxfevals=120) + return True + + +############################################################################### +# Two qubit gate calibration graph +############################################################################### +import os +import pycqed as pq +from pycqed.measurement.openql_experiments import generate_CC_cfg as gc +input_file = os.path.join(pq.__path__[0], 'measurement', + 'openql_experiments', 'config_cc_s17_direct_iq.json.in') +config_fn = os.path.join(pq.__path__[0], 'measurement', + 'openql_experiments', 'output_cc_s17','config_cc_s17_direct_iq.json') + +TWOQ_GATE_DURATION = 60e-9 +TWOQ_GATE_DURATION_NS = 60 + +OFFSET_QUBITS = [] # ['X2', 'X3', 'X4', 'D7', 'D9'] + +class Two_qubit_gate_calibration(AutoDepGraph_DAG): + def __init__(self, + name: str, + station, + Qubit_pairs: list = None, + **kwargs): + super().__init__(name, **kwargs) + if Qubit_pairs == None: + Qubit_pairs = [ + ['Z3', 'D7'], + ['D5', 'Z1'], + ['Z4', 'D9'], + ['Z1', 'D2'], + ['D4', 'Z3'], + ['D6', 'Z4'], + ['Z4', 'D8'], + ['D4', 'Z1'], + ['D6', 'Z2'], + ['Z2', 'D3'], + ['Z1', 'D1'], + ['D5', 'Z4'], + # ['X1', 'D2'], + # ['D6', 'X2'], + # ['X3', 'D8'], + # ['X1', 'D1'], + # ['D5', 'X2'], + # ['X3', 'D7'], + # ['X4', 'D9'], + # ['D5', 'X3'], + # ['X2', 'D3'], + # ['X4', 'D8'], + # ['D4', 'X3'], + # ['X2', 'D2'], + ] + self.station = station + self.create_dep_graph(Qubit_pairs=Qubit_pairs) + + def create_dep_graph(self, Qubit_pairs:list): + ''' + Dependency graph for the calibration of + single-qubit gates. + ''' + print(f'Creating dependency graph for two-qubit gate calibration') + ############################## + # Grah nodes + ############################## + module_name = 'pycqed.instrument_drivers.meta_instrument.Surface17_dependency_graph' + + + # Single-qubit nodes + Qubits = np.unique(np.array(Qubit_pairs).flatten()) + for q in Qubits: + self.add_node(f'{q} Flux arc', + calibrate_function=module_name+'.Flux_arc_wrapper', + calibrate_function_args={ + 'Qubit' : q, + 'station': self.station, + }) + # Two-qubit nodes + QL_detunings = { + ('Z1', 'D2') : 250e6,#400e6, + ('Z1', 'D1') : 400e6, + ('Z4', 'D8') : 100e6, + # ('Z4', 'D9') : 100e6, + ('X3', 'D7') : 100e6, + ('X3', 'D8') : 100e6, + } + for pair in Qubit_pairs: + self.add_node(f'{pair[0]}, {pair[1]} Chevron', + calibrate_function=module_name+'.Chevron_wrapper', + calibrate_function_args={ + 'qH' : pair[0], + 'qL' : pair[1], + 'station': self.station, + 'qL_det': QL_detunings[tuple(pair)] \ + if tuple(pair) in QL_detunings.keys() else 0 + }) + + self.add_node(f'{pair[0]}, {pair[1]} SNZ tmid', + calibrate_function=module_name+'.SNZ_tmid_wrapper', + calibrate_function_args={ + 'qH' : pair[0], + 'qL' : pair[1], + 'station': self.station + }) + + self.add_node(f'{pair[0]}, {pair[1]} SNZ AB', + calibrate_function=module_name+'.SNZ_AB_wrapper', + calibrate_function_args={ + 'qH' : pair[0], + 'qL' : pair[1], + 'station': self.station + }) + + self.add_node(f'{pair[0]}, {pair[1]} Asymmetry', + calibrate_function=module_name+'.Asymmetry_wrapper', + calibrate_function_args={ + 'qH' : pair[0], + 'qL' : pair[1], + 'station': self.station + }) + + self.add_node(f'{pair[0]}, {pair[1]} 1Q phase', + calibrate_function=module_name+'.Single_qubit_phase_calibration_wrapper', + calibrate_function_args={ + 'qH' : pair[0], + 'qL' : pair[1], + 'station': self.station + }) + + self.add_node(f'{pair[0]}, {pair[1]} 2Q IRB', + calibrate_function=module_name+'.TwoQ_Randomized_benchmarking_wrapper', + calibrate_function_args={ + 'qH' : pair[0], + 'qL' : pair[1], + 'station': self.station + }) + + # Save snpashot + self.add_node('Save snapshot', + calibrate_function=module_name+'.save_snapshot_metadata', + calibrate_function_args={ + 'station': self.station, + }) + + ############################## + # Node depdendencies + ############################## + for Q_pair in Qubit_pairs: + self.add_edge('Save snapshot', + f'{Q_pair[0]}, {Q_pair[1]} 2Q IRB') + + self.add_edge(f'{Q_pair[0]}, {Q_pair[1]} 2Q IRB', + f'{Q_pair[0]}, {Q_pair[1]} 1Q phase') + + self.add_edge(f'{Q_pair[0]}, {Q_pair[1]} 1Q phase', + f'{Q_pair[0]}, {Q_pair[1]} Asymmetry') + + self.add_edge(f'{Q_pair[0]}, {Q_pair[1]} Asymmetry', + f'{Q_pair[0]}, {Q_pair[1]} SNZ AB') + + self.add_edge(f'{Q_pair[0]}, {Q_pair[1]} SNZ AB', + f'{Q_pair[0]}, {Q_pair[1]} SNZ tmid') + + self.add_edge(f'{Q_pair[0]}, {Q_pair[1]} SNZ tmid', + f'{Q_pair[0]}, {Q_pair[1]} Chevron') + + self.add_edge(f'{Q_pair[0]}, {Q_pair[1]} Chevron', + f'{Q_pair[0]} Flux arc') + self.add_edge(f'{Q_pair[0]}, {Q_pair[1]} Chevron', + f'{Q_pair[1]} Flux arc') + + ############################## + # Create graph + ############################## + self.cfg_plot_mode = 'svg' + self.update_monitor() + self.cfg_svg_filename + url = self.open_html_viewer() + print('Dependency graph created at ' + url) + + +def Cryoscope_wrapper(Qubit, station, detuning=None, + update_IIRs=False, + update_FIRs=False, + max_duration: float = 100e-9, **kw): + ''' + Wrapper function for measurement of Cryoscope. + This will update the required polynomial coeficients + for detuning to voltage conversion. + ''' + # Set gate duration + flux_duration_ns = int(max_duration*1e9) + 100 + # flux_duration_ns = int(max_duration*1e9) + file_cfg = gc.generate_config(in_filename=input_file, + out_filename=config_fn, + mw_pulse_duration=20, + ro_duration=1000, + flux_pulse_duration=flux_duration_ns, + init_duration=200000) + if 'live_plot_enabled' in kw.keys(): + _live_plot = kw['live_plot_enabled'] + else: + _live_plot = False + station.components['MC'].live_plot_enabled(_live_plot) + station.components['nested_MC'].live_plot_enabled(_live_plot) + # Setup measurement + Q_inst = station.components[Qubit] + # Q_inst.prepare_readout() + # Set microwave lutman + Q_mlm = Q_inst.instr_LutMan_MW.get_instr() + Q_mlm.set_default_lutmap() + # Q_mlm.load_waveforms_onto_AWG_lookuptable(regenerate_waveforms=True) + # Set flux lutman + Q_flm = Q_inst.instr_LutMan_Flux.get_instr() + if max_duration > TWOQ_GATE_DURATION: + Q_flm.cfg_max_wf_length(max_duration) + Q_flm.AWG.get_instr().reset_waveforms_zeros() + Q_flm.load_waveforms_onto_AWG_lookuptable(regenerate_waveforms=True) + Q_inst.prepare_for_timedomain() + # Find amplitudes corresponding to specified frequency detunings + # if there are existing polycoefs, try points at specified detunings + if detuning == None: + if Qubit in ['D4', 'D5', 'D6']: + detuning = 600e6 + else: + detuning = 900e6 + # TODO: Commented out because we want to start from default condition + if all(Q_flm.q_polycoeffs_freq_01_det() != None): + sq_amp = get_DAC_amp_frequency(detuning, Q_flm, + negative_amp=True if Qubit in OFFSET_QUBITS else False) + else: + sq_amp = .5 + # sq_amp = .5 + if 'sq_amp' in kw: + sq_amp = kw['sq_amp'] + + Q_flm.sq_amp(sq_amp) + if sq_amp < 0: + print('Using negative amp') + device = station.components['device'] + if 'ro_acq_averages' in kw.keys(): + avg = kw['ro_acq_averages'] + else: + avg = 2**10 + device.ro_acq_averages(avg) + device.ro_acq_weight_type('optimal') + device.measure_cryoscope( + qubits=[Qubit], + times = np.arange(0e-9, max_duration, 1/2.4e9), + wait_time_flux = 40, + update_FIRs = update_FIRs, + update_IIRs = update_IIRs) + # Reset wavform duration + if max_duration > TWOQ_GATE_DURATION: + Q_flm.cfg_max_wf_length(TWOQ_GATE_DURATION) + Q_flm.AWG.get_instr().reset_waveforms_zeros() + return True + + +def Flux_arc_wrapper(Qubit, station, + Detunings: list = None, + fix_zero_detuning:bool=True, + Amps = None, + repetitions = 2**10): + ''' + Wrapper function for measurement of flux arcs. + This will update the required polynomial coeficients + for detuning to voltage conversion. + ''' + # Set gate duration + TQG_duration_ns = TWOQ_GATE_DURATION_NS + file_cfg = gc.generate_config(in_filename=input_file, + out_filename=config_fn, + mw_pulse_duration=20, + ro_duration=1000, + flux_pulse_duration=TQG_duration_ns, + init_duration=200000) + station.components['MC'].live_plot_enabled(False) + station.components['nested_MC'].live_plot_enabled(False) + # Setup measurement + Q_inst = station.components[Qubit] + Q_inst.ro_acq_averages(repetitions) + Q_inst.ro_acq_weight_type('optimal') + # Q_inst.prepare_readout() + # Set microwave lutman + Q_mlm = Q_inst.instr_LutMan_MW.get_instr() + Q_mlm.set_default_lutmap() + # Q_mlm.load_waveforms_onto_AWG_lookuptable(regenerate_waveforms=True) + # Set flux lutman + Q_flm = Q_inst.instr_LutMan_Flux.get_instr() + check_flux_wf_duration(Q_flm) # (legacy) + Q_flm.load_waveforms_onto_AWG_lookuptable(regenerate_waveforms=True) + Q_inst.prepare_for_timedomain() + # Find amplitudes corresponding to specified frequency detunings + # if there are existing polycoefs, try points at specified detunings + if Detunings is None: + if Qubit in ['D4', 'D5', 'D6']: + Detunings = [600e6, 400e6, 200e6] + else: + Detunings = [900e6, 700e6, 500e6] + + # TODO: Commented out because we want to start from default condition + if Amps is None: + if all(Q_flm.q_polycoeffs_freq_01_det() != None): + # Amps = [ 0, 0, 0, 0, ] + Amps = [ 0, 0, 0, 0, 0, 0] + # To avoid updates to the channel gain during this step + # we calculate all the amplitudes before setting them + for det in Detunings: + get_DAC_amp_frequency(det, Q_flm, negative_amp=True) + get_DAC_amp_frequency(det, Q_flm) + for j, det in enumerate(Detunings): + Amps[j] = get_DAC_amp_frequency(det, Q_flm, negative_amp=True) + Amps[-(j+1)] = get_DAC_amp_frequency(det, Q_flm) + # If not, try some random amplitudes + # else: + # Amps = [-0.4, -0.35, -0.3, 0.3, 0.35, 0.4] + + + + # Measure flux arc + for i in range(2): + print(Amps) + a = Q_inst.calibrate_flux_arc( + Amplitudes=Amps, + Times = np.arange(40e-9, 60e-9, 1/2.4e9), + update=True, + disable_metadata=True, + prepare_for_timedomain=False, + fix_zero_detuning=fix_zero_detuning) + max_freq = np.max(a.proc_data_dict['Freqs']) + # If flux arc spans 750 MHz + if max_freq>np.max(Detunings)-150e6: + return True + # Expand scan range to include higher frequency + else: + for j, det in enumerate(Detunings): + sq_amp = get_DAC_amp_frequency(det, Q_flm) + Amps[j] = -sq_amp + Amps[-(j+1)] = sq_amp + # If not successful after 3 attempts fail node + return False + + +def Chevron_wrapper(qH, qL, station, + avoided_crossing: str = '11-02', + qL_det: float = 0, + park_distance: float = 700e6, + negative_amp: bool = False, + use_premeasured_values: bool = True, + **kw): + ''' + Wrapper function for measurement of Chevrons. + Using voltage to detuning information, we predict the + amplitude of the interaction for the desired avoided + crossing and measure a chevron within frequency range. + Args: + qH: High frequency qubit. + qL: Low frequency qubit. + avoided crossing: "11-02" or "11-20" + (in ascending detuning order) + qL_det: Detuning of low frequency qubit. This + feature is used to avoid spurious TLSs. + park_distance: Minimum (frequency) distance of + parked qubits to low-frequency + qubit. + ''' + # Set gate duration + TQG_duration_ns = TWOQ_GATE_DURATION_NS + file_cfg = gc.generate_config(in_filename=input_file, + out_filename=config_fn, + mw_pulse_duration=20, + ro_duration=1000, + flux_pulse_duration=TQG_duration_ns, + init_duration=200000) + if 'live_plot_enabled' in kw.keys(): + _live_plot = kw['live_plot_enabled'] + else: + _live_plot = False + station.components['MC'].live_plot_enabled(_live_plot) + station.components['nested_MC'].live_plot_enabled(_live_plot) + # Setup for measurement + device = station.components['device'] + device.ro_acq_weight_type('optimal') + device.ro_acq_averages(2**9) + # Perform measurement of 11_02 avoided crossing + Q_H = station.components[qH] + Q_L = station.components[qL] + flux_lm_H = Q_H.instr_LutMan_Flux.get_instr() + flux_lm_L = Q_L.instr_LutMan_Flux.get_instr() + # For qubits off the sweet-spot, the amplitude should be negative + if qH in OFFSET_QUBITS: + negative_amp = True + if negative_amp: + flux_lm_H.sq_amp(-.5) + else: + flux_lm_H.sq_amp(.5) + flux_lm_H.sq_delay(0) + # Set frequency of low frequency qubit + if abs(qL_det) < 10e6: + sq_amp_L = 0 # avoids error near 0 in the flux arc. + else: + dircts = get_gate_directions(qH, qL) + flux_lm_L.set(f'q_freq_10_{dircts[1]}', qL_det) + sq_amp_L = get_DAC_amp_frequency(qL_det, flux_lm_L, + negative_amp=True if qL in OFFSET_QUBITS else False) + flux_lm_L.sq_amp(sq_amp_L) + flux_lm_L.sq_length(60e-9) + flux_lm_L.sq_delay(0) + # for lm in [flux_lm_H, flux_lm_L]: + # load_single_waveform_on_HDAWG(lm, wave_id='square') + device.prepare_fluxing(qubits = [qH, qL]) + # Set frequency of parked qubits + park_freq = Q_L.freq_qubit()-qL_det-park_distance + for q in get_parking_qubits(qH, qL): + Q_inst = station.components[q] + flux_lm_p = Q_inst.instr_LutMan_Flux.get_instr() + park_det = Q_inst.freq_qubit()-park_freq + # Only park if the qubit is closer than then 350 MHz + if park_det>20e6: + sq_amp_park = get_DAC_amp_frequency(park_det, flux_lm_p) + flux_lm_p.sq_amp(sq_amp_park) + else: + flux_lm_p.sq_amp(0) + flux_lm_p.sq_length(60e-9) + flux_lm_p.sq_delay(0) + load_single_waveform_on_HDAWG(flux_lm_p, wave_id='square') + # Estimate avoided crossing amplitudes + f_H, a_H = Q_H.freq_qubit(), Q_H.anharmonicity() + f_L, a_L = Q_L.freq_qubit(), Q_L.anharmonicity() + detuning_11_02, detuning_11_20 = \ + calculate_avoided_crossing_detuning(f_H, f_L, a_H, a_L) + # Estimating scan ranges based on frequency range + if 'scan_range' not in kw: + scan_range = 200e6 + else: + scan_range = kw['scan_range'] + if avoided_crossing == '11-02': + _det = detuning_11_02 + elif avoided_crossing == '11-20': + _det = detuning_11_20 + A_range = [] + for r in [-scan_range/2, scan_range/2]: + _ch_amp = get_Ch_amp_frequency(_det+r+qL_det, flux_lm_H) + A_range.append(_ch_amp) + # Known values (if these are given, + # this measurement will be skipped) + Qubit_pair_Tp = { + ('Z3', 'D7'): 2.125e-08, + ('D5', 'Z1'): 1.875e-08, + ('Z4', 'D9'): 2.2083333333333333e-08, + ('D4', 'Z3'): 1.75e-08, + ('D6', 'Z4'): 1.875e-08, + ('Z1', 'D2'): 2.4166666666666668e-08, + ('D4', 'Z1'): 1.75e-08, + ('D6', 'Z2'): 1.75e-08, + ('Z4', 'D8'): 2.0833333333333335e-08, + ('Z2', 'D3'): 2.1666666666666665e-08+1/2.4e9, + ('Z1', 'D1'): 54 / 2.4e9, + ('D5', 'Z4'): 1.875e-08, + ('X1', 'D1'): 2.0833333333333335e-08, # (48 sampling points) -> Go to 50 sampling points (2.0833333333333335e-08) + ('X1', 'D2'): 2.2083333333333333e-08+2/2.4e9, + ('D5', 'X2'): 1.875e-08-2/2.4e9, + ('D6', 'X2'): 1.9583333333333333e-08-1/2.4e9, + ('D4', 'X3'): 2.2083333333333333e-08, + ('D5', 'X3'): 2.0416666666666668e-08, + ('X2', 'D2'): 2.0833333333333335e-08-1/2.4e9, # Increased Tp time, because of undershoot + ('X2', 'D3'): 1.9583333333333333e-08-1/2.4e9, + ('X3', 'D7'): 2.0416666666666668e-08-1/2.4e9, + ('X3', 'D8'): 2.1666666666666665e-08-1/2.4e9, + ('X4', 'D8'): 2.0833333333333335e-08, + ('X4', 'D9'): 1.9583333333333333e-08, + } + # Run measurement + # !PROBLEM! prepare for readout is not enough + # for wtv reason, need to look into this! + # device.prepare_readout(qubits=[qH, qL]) + device.prepare_for_timedomain(qubits=[qH, qL], bypass_flux=False) + park_qubits = get_parking_qubits(qH, qL)+[qL] + device.measure_chevron( + q0=qH, + q_spec=qL, + amps=np.linspace(A_range[0], A_range[1], 21), + q_parks=park_qubits, + lengths=np.linspace(10, 60, 21) * 1e-9, + target_qubit_sequence='excited', + waveform_name="square", + prepare_for_timedomain=False, + disable_metadata=True, + ) + # run analysis + a = ma2.tqg.Chevron_Analysis( + QH_freq=Q_H.freq_qubit(), + QL_det=qL_det, + avoided_crossing=avoided_crossing, + Out_range=flux_lm_H.cfg_awg_channel_range(), + DAC_amp=flux_lm_H.sq_amp(), + Poly_coefs=flux_lm_H.q_polycoeffs_freq_01_det()) + if ((qH, qL) in Qubit_pair_Tp.keys()) and use_premeasured_values: + # Hardcoding optimal TPs + print('Using pre-measured optimal values') + # Update flux lutman parameters + dircts = get_gate_directions(qH, qL) + flux_lm_H.set(f'vcz_time_single_sq_{dircts[0]}', Qubit_pair_Tp[(qH, qL)]) + flux_lm_L.set(f'vcz_time_single_sq_{dircts[1]}', Qubit_pair_Tp[(qH, qL)]) + else: + # Update flux lutman parameters + dircts = get_gate_directions(qH, qL) + # tp of SNZ + tp = a.qoi['Tp'] + tp_dig = np.ceil((tp/2)*2.4e9)*2/2.4e9 + print('Find fitting value:', tp_dig/2, 's') + # tp_dig += 2*2/2.4e9 # To prevent too short SNZ cases + if [qH, qL] in [['Z1', 'D1'], ['Z2', 'D3'], ['D4', 'X3']]: + tp_dig += 2*2/2.4e9 # this should be removed later + if [qH, qL] in [['Z3', 'D7']]: + tp_dig += 3*2/2.4e9 # this should be removed later + if [qH, qL] in [['Z1', 'D2']]: + tp_dig += 1*2/2.4e9 # this should be removed later + if qL_det > 200e6: + tp_dig += 8/2.4e9 + flux_lm_H.set(f'vcz_time_single_sq_{dircts[0]}', tp_dig/2) + flux_lm_L.set(f'vcz_time_single_sq_{dircts[1]}', tp_dig/2) + print('Setting tp/2 to', flux_lm_H.get(f'vcz_time_single_sq_{dircts[0]}'), 's') + # detuning frequency of interaction + flux_lm_H.set(f'q_freq_10_{dircts[0]}', a.qoi['detuning_freq']) + flux_lm_L.set(f'q_freq_10_{dircts[1]}', qL_det) + return True + + +def SNZ_tmid_wrapper(qH, qL, station, + park_distance: float = 700e6, + apply_parking_settings: bool = True, + asymmetry_compensation: bool = False, + tmid_offset_samples: int = 0, + **kw): + ''' + Wrapper function for measurement of of SNZ landscape. + Using voltage to detuning information, we set the + amplitude of the interaction based on previous updated + values of qubit detunings (q_freq_10_) from + Chevron measurement. + Args: + qH: High frequency qubit. + qL: Low frequency qubit. + park_distance: Minimum (frequency) distance of + parked qubits to low-frequency + ''' + # Set gate duration + TQG_duration_ns = TWOQ_GATE_DURATION_NS - 20 + file_cfg = gc.generate_config(in_filename=input_file, + out_filename=config_fn, + mw_pulse_duration=20, + ro_duration=1000, + flux_pulse_duration=TQG_duration_ns, + init_duration=200000) + if 'live_plot_enabled' in kw.keys(): + _live_plot = kw['live_plot_enabled'] + else: + _live_plot = False + station.components['MC'].live_plot_enabled(_live_plot) + station.components['nested_MC'].live_plot_enabled(_live_plot) + # Setup for measurement + dircts = get_gate_directions(qH, qL) + Q_H = station.components[qH] + Q_L = station.components[qL] + flux_lm_H = Q_H.instr_LutMan_Flux.get_instr() + flux_lm_L = Q_L.instr_LutMan_Flux.get_instr() + flux_lm_H.set(f'vcz_amp_sq_{dircts[0]}', 1) + flux_lm_H.set(f'vcz_amp_fine_{dircts[0]}', 0.5) + flux_lm_H.set(f'vcz_amp_dac_at_11_02_{dircts[0]}', 0.5) + # For qubits off the sweet-spot, the amplitude should be negative + if qH in OFFSET_QUBITS: + flux_lm_H.set(f'vcz_amp_dac_at_11_02_{dircts[0]}', -0.5) + # Set frequency of low frequency qubit + qL_det = flux_lm_L.get(f'q_freq_10_{dircts[1]}') # detuning at gate + if abs(qL_det) < 10e6: + sq_amp_L = 0 # avoids error near 0 in the flux arc. + else: + sq_amp_L = get_DAC_amp_frequency(qL_det, flux_lm_L, + negative_amp=True if qL in OFFSET_QUBITS else False) + flux_lm_L.set(f'vcz_amp_sq_{dircts[1]}', 1) + flux_lm_L.set(f'vcz_amp_fine_{dircts[1]}', 0) + flux_lm_L.set(f'vcz_amp_dac_at_11_02_{dircts[1]}', sq_amp_L) + # Check waveform durations + check_flux_wf_duration(flux_lm_H) + check_flux_wf_duration(flux_lm_L) + # Set frequency of parked qubits + Parked_qubits = get_parking_qubits(qH, qL) + if apply_parking_settings: + park_freq = Q_L.freq_qubit()-qL_det-park_distance + for q in Parked_qubits: + Q_inst = station.components[q] + flux_lm_p = Q_inst.instr_LutMan_Flux.get_instr() + park_det = Q_inst.freq_qubit()-park_freq + # Only park if the qubit is closer than + if park_det>10e6: + amp_park_pos = get_DAC_amp_frequency(park_det, flux_lm_p) + amp_park_neg = get_DAC_amp_frequency(park_det, flux_lm_p, + negative_amp=True) + _Amps = [amp_park_pos, amp_park_neg] + amp_park_idx = np.argmax(np.abs(_Amps)) + # Update parking amplitude in lookup table + flux_lm_p.park_amp(_Amps[amp_park_idx]) + else: + flux_lm_p.park_amp(0) + # Check wf duration of park qubits + check_flux_wf_duration(flux_lm_p) + # Estimating scan ranges based on frequency range + if 'scan_range' not in kw: + scan_range = 40e6 + else: + scan_range = kw['scan_range'] + + if 'A_points' not in kw: + A_points = 11 + else: + A_points = kw['A_points'] + _det = flux_lm_H.get(f'q_freq_10_{dircts[0]}') # detuning at gate + # Predict required gate asymetry + if asymmetry_compensation: + # We use the sq_amp to calculate positive and negative amps for the pulse. + # (vcz_amp_dac_at_11_02 does not allow negative values). + _amp = flux_lm_H.get(f'vcz_amp_dac_at_11_02_{dircts[0]}') + flux_lm_H.sq_amp(+_amp) + gain_high = get_Ch_amp_frequency(_det, flux_lm_H, DAC_param='sq_amp') + flux_lm_H.sq_amp(-_amp) + gain_low = get_Ch_amp_frequency(_det, flux_lm_H, DAC_param='sq_amp') + gain = (gain_high+gain_low)/2 + asymmetry = (gain_high-gain_low)/(gain_high+gain_low) + flux_lm_H.set(f'vcz_use_asymmetric_amp_{dircts[0]}', True) + flux_lm_H.set(f'vcz_asymmetry_{dircts[0]}', asymmetry) + # flux_lm_H.set(f'cfg_awg_channel_amplitude', gain) + # Set new detunning corresponding to average gain + ch_amp_0 = get_Ch_amp_frequency(_det, flux_lm_H, DAC_param=f'sq_amp') + delta_ch_amp_p = get_Ch_amp_frequency(_det+scan_range/2, flux_lm_H, DAC_param=f'sq_amp') - ch_amp_0 + delta_ch_amp_m = get_Ch_amp_frequency(_det-scan_range/2, flux_lm_H, DAC_param=f'sq_amp') - ch_amp_0 + A_range = [gain+delta_ch_amp_m, gain+delta_ch_amp_p] + # Predict range without asymmetry + else: + A_range = [] + for r in [-scan_range/2, scan_range/2]: + _ch_amp = get_Ch_amp_frequency(_det+r, flux_lm_H, + DAC_param=f'vcz_amp_dac_at_11_02_{dircts[0]}') + A_range.append(_ch_amp) + # Assess if unipolar pulse is required + # if qH in OFFSET_QUBITS: + # flux_lm_H.set(f'vcz_use_net_zero_pulse_{dircts[0]}', False) + # # if working with asymmetric pulses + # if asymmetry_compensation: + # flux_lm_H.set(f'vcz_use_net_zero_pulse_{dircts[0]}', True) + # else: + # flux_lm_H.set(f'vcz_use_net_zero_pulse_{dircts[0]}', False) + # # Setting pading amplitude to ensure net-zero waveform + # make_unipolar_pulse_net_zero(flux_lm_H, f'cz_{dircts[0]}') + # if tmid_offset_samples == 0: + # tmid_offset_samples = 1 + # Perform measurement of 11_02 avoided crossing + device = station['device'] + device.ro_acq_averages(2**8) + device.ro_acq_weight_type('optimal') + device.prepare_for_timedomain(qubits=[qH, qL], bypass_flux=True) + device.prepare_fluxing(qubits=[qH, qL]+Parked_qubits) + device.measure_vcz_A_tmid_landscape( + Q0 = [qH], + Q1 = [qL], + T_mids = np.arange(10) + tmid_offset_samples, + A_ranges = [A_range], + A_points = A_points, + Q_parks = Parked_qubits, + flux_codeword = 'cz', + flux_pulse_duration = TWOQ_GATE_DURATION, + prepare_for_timedomain=False, + disable_metadata=True) + a = ma2.tqg.VCZ_tmid_Analysis(Q0=[qH], Q1=[qL], + A_ranges=[A_range], + Poly_coefs = [flux_lm_H.q_polycoeffs_freq_01_det()], + DAC_amp = flux_lm_H.get(f'vcz_amp_dac_at_11_02_{dircts[0]}'), + Out_range = flux_lm_H.cfg_awg_channel_range(), + Q0_freq = Q_H.freq_qubit(), + asymmetry = flux_lm_H.get(f'vcz_asymmetry_{dircts[0]}')\ + if asymmetry_compensation else 0, + label=f'VCZ_Amp_vs_Tmid_{[qH]}_{[qL]}_{Parked_qubits}') + opt_det, opt_tmid = a.qoi['opt_params_0'] + # Set new interaction frequency + flux_lm_H.set(f'q_freq_10_{dircts[0]}', opt_det) + # round tmid to th sampling point + opt_tmid = np.round(opt_tmid) + # Set optimal timing SNZ parameters + Flux_lm_ps = [ device.find_instrument(q).instr_LutMan_Flux.get_instr()\ + for q in Parked_qubits ] + tmid_swf = swf.flux_t_middle_sweep( + fl_lm_tm = [flux_lm_H, flux_lm_L], + fl_lm_park = Flux_lm_ps, + which_gate = list(dircts), + duration=TWOQ_GATE_DURATION, + time_park=TWOQ_GATE_DURATION-(6/2.4e9), + t_pulse = [flux_lm_H.get(f'vcz_time_single_sq_{dircts[0]}')*2]) + tmid_swf.set_parameter(opt_tmid) + return True + + +def SNZ_AB_wrapper(qH, qL, station, + park_distance: float = 700e6, + apply_parking_settings: bool = True, + asymmetry_compensation: bool = False, + flux_cw: str = 'cz', + **kw): + ''' + Wrapper function for measurement of of SNZ landscape. + Using voltage to detuning information, we set the + amplitude of the interaction based on previous updated + values of qubit detunings (q_freq_10_) from + Chevron measurement. + Args: + qH: High frequency qubit. + qL: Low frequency qubit. + park_distance: Minimum (frequency) distance of + parked qubits to low-frequency + ''' + # Set gate duration + TQG_duration_ns = TWOQ_GATE_DURATION_NS - 20 + file_cfg = gc.generate_config(in_filename=input_file, + out_filename=config_fn, + mw_pulse_duration=20, + ro_duration=1000, + flux_pulse_duration=TQG_duration_ns, + init_duration=200000) + if 'live_plot_enabled' in kw.keys(): + _live_plot = kw['live_plot_enabled'] + else: + _live_plot = False + station.components['MC'].live_plot_enabled(_live_plot) + station.components['nested_MC'].live_plot_enabled(_live_plot) + # Setup for measurement + dircts = get_gate_directions(qH, qL) + Q_H = station.components[qH] + Q_L = station.components[qL] + flux_lm_H = Q_H.instr_LutMan_Flux.get_instr() + flux_lm_L = Q_L.instr_LutMan_Flux.get_instr() + flux_lm_H.set(f'vcz_amp_sq_{dircts[0]}', 1) + flux_lm_H.set(f'vcz_amp_fine_{dircts[0]}', 0.5) + flux_lm_H.set(f'vcz_amp_dac_at_11_02_{dircts[0]}', 0.5) + # For qubits off the sweet-spot, the amplitude should be negative + if qH in OFFSET_QUBITS: + flux_lm_H.set(f'vcz_amp_dac_at_11_02_{dircts[0]}', -0.5) + + # Assess if unipolar pulse is required + # if qH in OFFSET_QUBITS: + # # For qubits off the sweet-spot, the amplitude should be negative + # flux_lm_H.set(f'vcz_amp_dac_at_11_02_{dircts[0]}', -0.5) + # flux_lm_H.set(f'vcz_use_net_zero_pulse_{dircts[0]}', False) + # # # if working with asymmetric pulses + # if asymmetry_compensation: + # flux_lm_H.set(f'vcz_use_net_zero_pulse_{dircts[0]}', True) + # else: + # flux_lm_H.set(f'vcz_use_net_zero_pulse_{dircts[0]}', False) + # # Setting pading amplitude to ensure net-zero waveform + # make_unipolar_pulse_net_zero(flux_lm_H, f'cz_{dircts[0]}') + # Set frequency of low frequency qubit + + qL_det = flux_lm_L.get(f'q_freq_10_{dircts[1]}') # detuning at gate + if abs(qL_det) < 10e6: + sq_amp_L = 0 # avoids error near 0 in the flux arc. + else: + sq_amp_L = get_DAC_amp_frequency(qL_det, flux_lm_L, + negative_amp=True if qL in OFFSET_QUBITS else False) + flux_lm_L.set(f'vcz_amp_sq_{dircts[1]}', 1) + flux_lm_L.set(f'vcz_amp_fine_{dircts[1]}', 0) + flux_lm_L.set(f'vcz_amp_dac_at_11_02_{dircts[1]}', sq_amp_L) + # Check waveform durations + check_flux_wf_duration(flux_lm_H) + check_flux_wf_duration(flux_lm_L) + # Set frequency of parked qubits + Parked_qubits = get_parking_qubits(qH, qL) + if apply_parking_settings: + park_freq = Q_L.freq_qubit()-qL_det-park_distance + for q in Parked_qubits: + Q_inst = station.components[q] + flux_lm_p = Q_inst.instr_LutMan_Flux.get_instr() + park_det = Q_inst.freq_qubit()-park_freq + # Only park if the qubit is closer than then 350 MHz + if park_det>10e6: + amp_park_pos = get_DAC_amp_frequency(park_det, flux_lm_p) + amp_park_neg = get_DAC_amp_frequency(park_det, flux_lm_p, + negative_amp=True) + _Amps = [amp_park_pos, amp_park_neg] + amp_park_idx = np.argmax(np.abs(_Amps)) + # Update parking amplitude in lookup table + flux_lm_p.park_amp(_Amps[amp_park_idx]) + else: + flux_lm_p.park_amp(0) + # Check wf duration of park qubits + check_flux_wf_duration(flux_lm_p) + # Estimating scan ranges based on frequency range + scan_range = 30e6 + _det = flux_lm_H.get(f'q_freq_10_{dircts[0]}') # detuning at gate + # Predict required range taking into account pulse asymmetry + if asymmetry_compensation: + # We use the sq_amp to calculate positive and negative amps for the pulse. + # (vcz_amp_dac_at_11_02 does not allow negative values). + _amp = flux_lm_H.get(f'vcz_amp_dac_at_11_02_{dircts[0]}') + flux_lm_H.sq_amp(+_amp) + gain_high = get_Ch_amp_frequency(_det, flux_lm_H, DAC_param='sq_amp') + flux_lm_H.sq_amp(-_amp) + gain_low = get_Ch_amp_frequency(_det, flux_lm_H, DAC_param='sq_amp') + gain = (gain_high+gain_low)/2 + # Set new detunning corresponding to average gain + ch_amp_0 = get_Ch_amp_frequency(_det, flux_lm_H, DAC_param=f'sq_amp') + delta_ch_amp_p = get_Ch_amp_frequency(_det+scan_range/2, flux_lm_H, + DAC_param=f'sq_amp') - ch_amp_0 + delta_ch_amp_m = get_Ch_amp_frequency(_det-scan_range/2, flux_lm_H, + DAC_param=f'sq_amp') - ch_amp_0 + A_range = [gain+delta_ch_amp_m, gain+delta_ch_amp_p] + # Predict range without asymmetry + else: + A_range = [] + for r in [-scan_range/2, scan_range/2]: + _ch_amp = get_Ch_amp_frequency(_det+r, flux_lm_H, + DAC_param=f'vcz_amp_dac_at_11_02_{dircts[0]}') + A_range.append(_ch_amp) + # Perform measurement of 11_02 avoided crossing + device = station['device'] + device.ro_acq_weight_type('optimal') + device.ro_acq_averages(2**8) + device.prepare_for_timedomain(qubits=[qH, qL], bypass_flux=True) + device.prepare_fluxing(qubits=[qH, qL]+Parked_qubits) + device.measure_vcz_A_B_landscape( + Q0 = [qH], + Q1 = [qL], + B_amps = np.linspace(0, 1, 15), + A_ranges = [A_range], + A_points = 15, + Q_parks = Parked_qubits, + flux_codeword = flux_cw, + update_flux_params = False, + prepare_for_timedomain=False, + disable_metadata=True) + # Run frequency based analysis + a = ma2.tqg.VCZ_B_Analysis(Q0=[qH], Q1=[qL], + A_ranges=[A_range], + directions=[dircts], + Poly_coefs = [flux_lm_H.q_polycoeffs_freq_01_det()], + DAC_amp = flux_lm_H.get(f'vcz_amp_dac_at_11_02_{dircts[0]}'), + Out_range = flux_lm_H.cfg_awg_channel_range(), + Q0_freq = Q_H.freq_qubit(), + asymmetry = flux_lm_H.get(f'vcz_asymmetry_{dircts[0]}')\ + if asymmetry_compensation else 0, + tmid = flux_lm_H.get(f'vcz_time_middle_{dircts[0]}'), + label=f'VCZ_Amp_vs_B_{[qH]}_{[qL]}_{Parked_qubits}') + # Set optimal gate params + flux_lm_H.set(f'q_freq_10_{dircts[0]}', a.qoi[f'Optimal_det_{qH}']) + flux_lm_H.set(f'vcz_amp_fine_{dircts[0]}', a.qoi[f'Optimal_amps_{qH}'][1]) + return True + + +def Unipolar_wrapper(qH, qL, station, + park_distance: float = 700e6, + apply_parking_settings: bool = True, + **kw): + ''' + Wrapper function for measurement of of SNZ landscape. + Using voltage to detuning information, we set the + amplitude of the interaction based on previous updated + values of qubit detunings (q_freq_10_) from + Chevron measurement. + Args: + qH: High frequency qubit. + qL: Low frequency qubit. + park_distance: Minimum (frequency) distance of + parked qubits to low-frequency + ''' + # Set gate duration + TQG_duration_ns = TWOQ_GATE_DURATION_NS - 20 + file_cfg = gc.generate_config(in_filename=input_file, + out_filename=config_fn, + mw_pulse_duration=20, + ro_duration=1000, + flux_pulse_duration=TQG_duration_ns, + init_duration=200000) + if 'live_plot_enabled' in kw.keys(): + _live_plot = kw['live_plot_enabled'] + else: + _live_plot = False + station.components['MC'].live_plot_enabled(_live_plot) + station.components['nested_MC'].live_plot_enabled(_live_plot) + # Setup for measurement + dircts = get_gate_directions(qH, qL) + Q_H = station.components[qH] + Q_L = station.components[qL] + flux_lm_H = Q_H.instr_LutMan_Flux.get_instr() + flux_lm_L = Q_L.instr_LutMan_Flux.get_instr() + flux_lm_H.set('sq_amp', .5) + flux_lm_H.sq_delay(6e-9) + # Assess if unipolar pulse is required + if qH in OFFSET_QUBITS: + # For qubits off the sweet-spot, the amplitude should be negative + flux_lm_H.set('sq_amp', -0.5) + # Set frequency of low frequency qubit + qL_det = flux_lm_L.get(f'q_freq_10_{dircts[1]}') # detuning at gate + if abs(qL_det) < 10e6: + sq_amp_L = 0 # avoids error near 0 in the flux arc. + else: + sq_amp_L = get_DAC_amp_frequency(qL_det, flux_lm_L) + flux_lm_L.set('sq_amp', sq_amp_L) + # Check waveform durations + check_flux_wf_duration(flux_lm_H) + check_flux_wf_duration(flux_lm_L) + # Set frequency of parked qubits + Parked_qubits = get_parking_qubits(qH, qL) + if apply_parking_settings: + park_freq = Q_L.freq_qubit()-qL_det-park_distance + for q in Parked_qubits: + Q_inst = station.components[q] + flux_lm_p = Q_inst.instr_LutMan_Flux.get_instr() + park_det = Q_inst.freq_qubit()-park_freq + # Only park if the qubit is closer than then 350 MHz + if park_det>20e6: + amp_park_pos = get_DAC_amp_frequency(park_det, flux_lm_p) + amp_park_neg = get_DAC_amp_frequency(park_det, flux_lm_p, + negative_amp=True) + _Amps = [amp_park_pos, amp_park_neg] + amp_park_idx = np.argmax(np.abs(_Amps)) + # Update parking amplitude in lookup table + flux_lm_p.park_amp(_Amps[amp_park_idx]) + else: + flux_lm_p.park_amp(0) + # Check wf duration of park qubits + check_flux_wf_duration(flux_lm_p) + # Estimating scan ranges based on frequency range + scan_range = 20e6 + _det = flux_lm_H.get(f'q_freq_10_{dircts[0]}') # detuning at gate + # Predict range + A_range = [] + for r in [-scan_range/2, scan_range/2]: + _ch_amp = get_Ch_amp_frequency(_det+r, flux_lm_H, + DAC_param='sq_amp') + A_range.append(_ch_amp) + # Perform measurement of 11_02 avoided crossing + device = station['device'] + device.ro_acq_weight_type('optimal') + device.ro_acq_averages(2**8) + device.prepare_for_timedomain(qubits=[qH, qL], bypass_flux=True) + device.prepare_fluxing(qubits=[qH, qL]+Parked_qubits) + device.measure_unipolar_A_t_landscape( + Q0 = [qH], + Q1 = [qL], + times = np.linspace(35e-9, 50e-9, 15), + A_ranges = [A_range], + A_points = 15, + Q_parks = Parked_qubits, + flux_codeword = 'sf_square', + update_flux_params = False, + prepare_for_timedomain=False, + disable_metadata=True) + # Run frequency based analysis + a = ma2.tqg.VCZ_B_Analysis(Q0=[qH], Q1=[qL], + A_ranges=[A_range], + directions=[dircts], + Poly_coefs = [flux_lm_H.q_polycoeffs_freq_01_det()], + DAC_amp = flux_lm_H.get('sq_amp'), + Out_range = flux_lm_H.cfg_awg_channel_range(), + Q0_freq = Q_H.freq_qubit(), + l1_coef = .5, + label=f'Unipolar_Amp_vs_t_{[qH]}_{[qL]}_{Parked_qubits}') + # Set optimal gate params + flux_lm_H.set(f'q_freq_10_{dircts[0]}', a.qoi[f'Optimal_det_{qH}']) + flux_lm_H.set('sq_length', a.qoi[f'Optimal_amps_{qH}'][1]) + return True + + +def Asymmetry_wrapper(qH, qL, station, flux_cw: str = 'cz'): + ''' + Wrapper function for fine-tuning SS using asymr of the SNZ pulse. + returns True. + ''' + # Set gate duration + TQG_duration_ns = TWOQ_GATE_DURATION_NS - 20 + file_cfg = gc.generate_config(in_filename=input_file, + out_filename=config_fn, + mw_pulse_duration=20, + ro_duration=1000, + flux_pulse_duration=TQG_duration_ns, + init_duration=200000) + # Setup for measurement + dircts = get_gate_directions(qH, qL) + Q_H = station.components[qH] + Q_L = station.components[qL] + mw_lutman_H = Q_H.instr_LutMan_MW.get_instr() + mw_lutman_L = Q_L.instr_LutMan_MW.get_instr() + flux_lm_H = Q_H.instr_LutMan_Flux.get_instr() + flux_lm_L = Q_L.instr_LutMan_Flux.get_instr() + # Set DAC amplitude for 2Q gate + det_qH = flux_lm_H.get(f'q_freq_10_{dircts[0]}') + det_qL = flux_lm_L.get(f'q_freq_10_{dircts[1]}') + amp_qH = get_DAC_amp_frequency(det_qH, flux_lm_H, + negative_amp=True if qH in OFFSET_QUBITS else False) + amp_qL = get_DAC_amp_frequency(det_qL, flux_lm_L, + negative_amp=True if qL in OFFSET_QUBITS else False) + # Compensate for asymmetry of cz pulse + _asymmetry = flux_lm_H.get(f'vcz_asymmetry_{dircts[0]}') + if abs(_asymmetry)> .075: + amp_qH = amp_qH/(1+flux_lm_H.get(f'vcz_asymmetry_{dircts[0]}')) + for i, det, amp, flux_lm in zip([ 0, 1], + [ det_qH, det_qL], + [ amp_qH, amp_qL], + [flux_lm_H, flux_lm_L]): + if abs(det) < 10e6: + flux_lm.set(f'vcz_amp_dac_at_11_02_{dircts[i]}', 0) + else: + flux_lm.set(f'vcz_amp_dac_at_11_02_{dircts[i]}', amp) + # Set preparation params + device = station['device'] + device.ro_acq_weight_type('optimal') + device.ro_acq_averages(2**11) # 2**10 + # Prepare readout + device.prepare_readout(qubits=[qH, qL]) + # Load flux waveforms + load_single_waveform_on_HDAWG(flux_lm_H, f'cz_{dircts[0]}') + load_single_waveform_on_HDAWG(flux_lm_L, f'cz_{dircts[1]}') + for mw1 in [mw_lutman_H, mw_lutman_L]: + mw1.load_phase_pulses_to_AWG_lookuptable() + flux_lm_H.set(f'vcz_use_asymmetric_amp_{dircts[0]}',True) + # Estimating asymmetry ranges based on frequency range + # if qH in ['X2', 'X3', 'X4']: + if qH in ['D4', 'D5', 'D6']: + _asym = flux_lm_H.get(f'vcz_asymmetry_{dircts[0]}') + asymmetries = np.linspace(-.5e-2, .5e-2, 7)+_asym + else: + _asym = flux_lm_H.get(f'vcz_asymmetry_{dircts[0]}') + asymmetries = np.linspace(-.25e-2, .25e-2, 7)+_asym + # scan_range = 10e6 + # _det = flux_lm_H.get(f'q_freq_10_{dircts[0]}') # detuning at gate + # # Get DAC amplitudes for each detuning + # sq_amp_0 = get_DAC_amp_frequency(_det, flux_lm_H) + # sq_amp_1 = get_DAC_amp_frequency(_det+scan_range/2, flux_lm_H) + # # Estimate asymmetry based required DAC amps + # asymetry_r = 1 - sq_amp_1/sq_amp_0 + # asymmetries = np.linspace(-asymetry_r, asymetry_r, 7) + # Measure + + device.calibrate_vcz_asymmetry( + Q0 = qH, + Q1 = qL, + prepare_for_timedomain=False, + Asymmetries = asymmetries, + Q_parks = get_parking_qubits(qH,qL), + update_params = True, + flux_codeword = flux_cw, + disable_metadata = True) + device.prepare_fluxing(qubits=[qH]) + return True + + +def Single_qubit_phase_calibration_wrapper(qH, qL, station, + park_distance=700e6, + apply_parking_settings: bool = True, + fine_cphase_calibration: bool = False, + qSpectator: list = None, + pc_repetitions = 1): + ''' + Wrapper function for fine-tunig CP 180 phase, SQ phase updates of 360, and verification. + Returns True if successful calibration otherwise + returns False. + ''' + # Set gate duration + TQG_duration_ns = TWOQ_GATE_DURATION_NS - 20 + # file_cfg = gc.generate_config(in_filename=input_file, + # out_filename=config_fn, + # mw_pulse_duration=20, + # ro_duration=1000, + # flux_pulse_duration=TQG_duration_ns, + # init_duration=200000) + # Setup for measurement + dircts = get_gate_directions(qH, qL) + station.components['MC'].live_plot_enabled(False) + station.components['nested_MC'].live_plot_enabled(False) + Q_H = station.components[qH] + Q_L = station.components[qL] + mw_lutman_H = Q_H.instr_LutMan_MW.get_instr() + mw_lutman_L = Q_L.instr_LutMan_MW.get_instr() + flux_lm_H = Q_H.instr_LutMan_Flux.get_instr() + flux_lm_L = Q_L.instr_LutMan_Flux.get_instr() + # Set DAC amplitude for 2Q gate + det_qH = flux_lm_H.get(f'q_freq_10_{dircts[0]}') + det_qL = flux_lm_L.get(f'q_freq_10_{dircts[1]}') + amp_qH = get_DAC_amp_frequency(det_qH, flux_lm_H, + negative_amp=True if qH in OFFSET_QUBITS else False) + amp_qL = get_DAC_amp_frequency(det_qL, flux_lm_L, + negative_amp=True if qL in OFFSET_QUBITS else False) + # Compensate for asymmetry of cz pulse + _asymmetry = flux_lm_H.get(f'vcz_asymmetry_{dircts[0]}') + if abs(_asymmetry)> .075: + amp_qH = amp_qH/(1+flux_lm_H.get(f'vcz_asymmetry_{dircts[0]}')) + for i, det, amp, flux_lm in zip([ 0, 1], + [ det_qH, det_qL], + [ amp_qH, amp_qL], + [flux_lm_H, flux_lm_L]): + if abs(det) < 10e6: + flux_lm.set(f'vcz_amp_dac_at_11_02_{dircts[i]}', 0) + else: + flux_lm.set(f'vcz_amp_dac_at_11_02_{dircts[i]}', amp) + # Assess if unipolar pulse is required + # if qH in OFFSET_QUBITS: + # # Setting pading amplitude to ensure net-zero waveform + # make_unipolar_pulse_net_zero(flux_lm_H, f'cz_{dircts[0]}') + # Set frequency of parked qubits + qL_det = flux_lm_L.get(f'q_freq_10_{dircts[1]}') # detuning at gate + Parked_qubits = get_parking_qubits(qH, qL) + for q in Parked_qubits: + park_freq = Q_L.freq_qubit()-qL_det-park_distance + Q_inst = station.components[q] + flux_lm_p = Q_inst.instr_LutMan_Flux.get_instr() + park_det = Q_inst.freq_qubit()-park_freq + print('park_det', park_det) + if apply_parking_settings: + # Only park if the qubit is closer than then 350 MHz + if park_det>20e6: + # Choose sign of parking waveform (necessary for off-sweetspot qubits) + amp_park_pos = get_DAC_amp_frequency(park_det, flux_lm_p) + amp_park_neg = get_DAC_amp_frequency(park_det, flux_lm_p, + negative_amp=True) + _Amps = [amp_park_pos, amp_park_neg] + amp_park_idx = np.argmax(np.abs(_Amps)) + # Update parking amplitude in lookup table + flux_lm_p.park_amp(_Amps[amp_park_idx]) + print('park_amp', flux_lm_p.park_amp()) + else: + flux_lm_p.park_amp(0) + load_single_waveform_on_HDAWG(flux_lm_p, 'park') + # Set preparation params + device = station['device'] + flux_cw = 'cz' + device.ro_acq_weight_type('optimal') + device.ro_acq_averages(2**10) + # Prepare readout + if qSpectator is None: + qSpectator = [] + qubits_awaiting_prepare = [qH, qL] + qSpectator + if check_prepare_readout(qubits=qubits_awaiting_prepare, station=station): + device.prepare_readout(qubits=qubits_awaiting_prepare) + else: + # if preparation is not deemed necessary try just updating detectors + try: + # acq_ch_map = device._acq_ch_map + acq_ch_map = device._prep_ro_assign_weights(qubits=qubits_awaiting_prepare) + # device._prep_ro_integration_weights(qubits=qubits) + device._prep_ro_instantiate_detectors(qubits=qubits_awaiting_prepare, acq_ch_map=acq_ch_map) + except: + device.prepare_readout(qubits=qubits_awaiting_prepare) + # device.prepare_readout(qubits=qubits_awaiting_prepare) + # Load flux waveforms + load_single_waveform_on_HDAWG(flux_lm_H, f'cz_{dircts[0]}') + load_single_waveform_on_HDAWG(flux_lm_L, f'cz_{dircts[1]}') + # Check waveform durations + check_flux_wf_duration(flux_lm_H) + check_flux_wf_duration(flux_lm_L) + if apply_parking_settings: + for q in Parked_qubits: + flux_lm_p = Q_inst.instr_LutMan_Flux.get_instr() + check_flux_wf_duration(flux_lm_p) + + + # device.prepare_for_timedomain(qubits=[qH, qL]) + # Check if mw phase pulses are uploaded + + + # prepare_for_parity_check('X4', station, Data_qubits=['D8', 'D9']) + for q in [qH, qL]: + Q = station.components[q] + Q._prep_td_sources() + if check_prepare_mw(q, station): + Q.cfg_prepare_mw_awg(False) + Q._prep_mw_pulses() + Q.cfg_prepare_mw_awg(True) + mw_lm = Q.instr_LutMan_MW.get_instr() + mw_lm.set_default_lutmap() + mw_lm.load_phase_pulses_to_AWG_lookuptable() + # Calibrate conditional phase + if fine_cphase_calibration: + device.calibrate_parity_check_phase( + Q_ancilla = [qH], + Q_control = [qL], + Q_pair_target = [qH, qL], + # flux_cw_list = ['repetition_code_3', 'repetition_code_4'], + flux_cw_list = [flux_cw], + downsample_angle_points = 3, + prepare_for_timedomain = False, + update_mw_phase=False, + mw_phase_param = f'vcz_virtual_q_ph_corr_{dircts[0]}', + disable_metadata=True) + load_single_waveform_on_HDAWG(flux_lm_H, f'cz_{dircts[0]}') + # single-qubit phase of high frequency qubit + device.measure_parity_check_ramsey( + Q_target = [qH], + Q_control = [qL], + Q_spectator=qSpectator, + # flux_cw_list = ['repetition_code_3', 'repetition_code_4'], + flux_cw_list = [flux_cw], + prepare_for_timedomain = False, + downsample_angle_points = 3, + update_mw_phase=True, + mw_phase_param=f'vcz_virtual_q_ph_corr_{dircts[0]}', + disable_metadata=True, + pc_repetitions=pc_repetitions) + # Calibrate low frequency qubit phase + device.measure_parity_check_ramsey( + Q_target = [qL], + Q_control = [qH], + Q_spectator=qSpectator, + # flux_cw_list = ['repetition_code_3', 'repetition_code_4'], + flux_cw_list = [flux_cw], + prepare_for_timedomain = False, + downsample_angle_points = 3, + update_mw_phase=True, + mw_phase_param=f'vcz_virtual_q_ph_corr_{dircts[1]}', + disable_metadata=True, + pc_repetitions=pc_repetitions) + + mw_lutman_H.upload_single_qubit_phase_corrections() + mw_lutman_L.upload_single_qubit_phase_corrections() + return True + + +def TwoQ_Randomized_benchmarking_wrapper(qH, qL, station, **kw): + ''' + Wrapper function around Randomized benchmarking. + Returns True if successful calibration otherwise + returns False. + ''' + # Set gate duration + TQG_duration_ns = TWOQ_GATE_DURATION_NS - 20 + # Buffer time after gate + if 'buffer_time_ns' in kw.keys(): + buffer_time_ns = kw['buffer_time_ns'] + else: + buffer_time_ns = 0 + TQG_duration_ns += buffer_time_ns + file_cfg = gc.generate_config(in_filename=input_file, + out_filename=config_fn, + mw_pulse_duration=20, + ro_duration=800, + flux_pulse_duration=TQG_duration_ns, + init_duration=200000) + # Setup for measurement + station.components['MC'].live_plot_enabled(False) + station.components['nested_MC'].live_plot_enabled(False) + dircts = get_gate_directions(qH, qL) + Q_H = station.components[qH] + Q_L = station.components[qL] + mw_lutman_H = Q_H.instr_LutMan_MW.get_instr() + mw_lutman_L = Q_L.instr_LutMan_MW.get_instr() + flux_lm_H = Q_H.instr_LutMan_Flux.get_instr() + flux_lm_L = Q_L.instr_LutMan_Flux.get_instr() + # Set DAC amplitude for 2Q gate + det_qH = flux_lm_H.get(f'q_freq_10_{dircts[0]}') + det_qL = flux_lm_L.get(f'q_freq_10_{dircts[1]}') + amp_qH = get_DAC_amp_frequency(det_qH, flux_lm_H, + negative_amp=True if qH in OFFSET_QUBITS else False) + amp_qL = get_DAC_amp_frequency(det_qL, flux_lm_L, + negative_amp=True if qL in OFFSET_QUBITS else False) + # Compensate for asymmetry of cz pulse + _asymmetry = flux_lm_H.get(f'vcz_asymmetry_{dircts[0]}') + if abs(_asymmetry)> .075: + amp_qH = amp_qH/(1+flux_lm_H.get(f'vcz_asymmetry_{dircts[0]}')) + for i, det, amp, flux_lm in zip([ 0, 1], + [ det_qH, det_qL], + [ amp_qH, amp_qL], + [flux_lm_H, flux_lm_L]): + if abs(det) < 10e6: + flux_lm.set(f'vcz_amp_dac_at_11_02_{dircts[i]}', 0) + else: + flux_lm.set(f'vcz_amp_dac_at_11_02_{dircts[i]}', amp) + # Check waveform duration + check_flux_wf_duration(flux_lm_L) + # Prepare device + device = station['device'] + flux_cw = 'cz' + device.ro_acq_weight_type('optimal IQ') + # device.ro_acq_averages(2**8) + device.ro_acq_averages(2**10) + device.ro_acq_digitized(False) + # Set preparation params + mw_lutman_H.set_default_lutmap() + mw_lutman_L.set_default_lutmap() + # Check waveform durations + check_flux_wf_duration(flux_lm_H) + check_flux_wf_duration(flux_lm_L) + device.prepare_for_timedomain(qubits=[qH, qL], bypass_flux=True) + # Load flux waveforms + load_single_waveform_on_HDAWG(flux_lm_H, f'cz_{dircts[0]}') + load_single_waveform_on_HDAWG(flux_lm_L, f'cz_{dircts[1]}') + # Assess recompilation + if 'recompile' in kw.keys(): + _recompile = kw['recompile'] + else: + _recompile = False + # measurement + device.measure_two_qubit_interleaved_randomized_benchmarking( + qubits = [qH, qL], + nr_seeds = 20, + measure_idle_flux = False, + prepare_for_timedomain = False, + recompile = _recompile, + nr_cliffords = np.array([0, 1., 3., 5., 7., 9., 11., 15., + 20., 30., 50.]), + flux_codeword = flux_cw) + return True + + +def TLS_density_wrapper(Qubit, station, + detuning = None, + max_duration = 120e-9, + use_second_excited_state: bool = False): + ''' + Wrapper function for measurement of TLS density. + Using a dynamical square pulse to flux the qubit + away while parking park_qubits. + Args: + Qubit: fluxed qubit. + park_qubits: list of parked qubits. + ''' + Qubit_parks = { + 'D1': [], + 'D2': [], + 'D3': [], + 'D4': ['Z1', 'Z3', 'X3'], + 'D5': ['Z1', 'Z4', 'X2', 'X3'], + 'D6': ['Z2', 'Z4', 'X2'], + 'D7': [], + 'D8': [], + 'D9': [], + 'Z1': ['D1', 'D2'], + 'Z2': ['D3'], + 'Z3': ['D7'], + 'Z4': ['D8', 'D9'], + 'X1': ['D1', 'D2'], + 'X2': ['D2', 'D3'], + 'X3': ['D7', 'D8'], + 'X4': ['D8', 'D9'], + } + # Set gate duration + if max_duration>TWOQ_GATE_DURATION: + delta = int(np.round((max_duration-TWOQ_GATE_DURATION)*1e9/20)*20) + else: + delta = 0 + TQG_duration_ns = TWOQ_GATE_DURATION_NS - 20 + file_cfg = gc.generate_config(in_filename=input_file, + out_filename=config_fn, + mw_pulse_duration=20, + ro_duration=1000, + flux_pulse_duration=TQG_duration_ns+delta+20, + init_duration=200000) + # Setup for measurement + station.components['MC'].live_plot_enabled(False) + station.components['nested_MC'].live_plot_enabled(False) + device = station.components['device'] + Flux_lm_q = station.components[Qubit].instr_LutMan_Flux.get_instr() + # Determine minimum detuning + p_coefs = Flux_lm_q.q_polycoeffs_freq_01_det() + freq_func = np.poly1d(p_coefs) + amp_0 = -p_coefs[1]/(2*p_coefs[0]) + det_0 = freq_func(amp_0) + # det_0 = Flux_lm_q.q_polycoeffs_freq_01_det()[-1] + if detuning is None: + detuning = np.arange(det_0, 1500e6, 5e6) + # Convert detuning to list of amplitudes + Flux_lm_q.sq_amp(0.5) + Amps = [ get_Ch_amp_frequency(det, Flux_lm_q, DAC_param='sq_amp')\ + for det in detuning ] + # Check parking qubits if needed and set the right parking distance. + Parked_qubits = Qubit_parks[Qubit] + # set parking amps for parked qubits. + if not Parked_qubits: + print('no parking qubits are defined') + else: + # Handle frequency of parked qubits + for q_park in Parked_qubits: + Q_park = station.components[q_park] + # minimum allowed detuning + minimum_detuning = 600e6 + f_q = station.components[Qubit].freq_qubit() + f_q_min = f_q-detuning[-1] + # required parked qubit frequency + f_q_park = f_q_min-minimum_detuning + det_q_park = Q_park.freq_qubit() - f_q_park + fl_lm_park = Q_park.instr_LutMan_Flux.get_instr() + if det_q_park > 10e6: + park_amp = get_DAC_amp_frequency(det_q_park, fl_lm_park) + else: + park_amp = 0 + fl_lm_park.sq_amp(park_amp) + fl_lm_park.sq_length(max_duration) + if max_duration > TWOQ_GATE_DURATION: + fl_lm_park.cfg_max_wf_length(max_duration) + fl_lm_park.AWG.get_instr().reset_waveforms_zeros() + # prepare for timedomains + if max_duration > TWOQ_GATE_DURATION: + Flux_lm_q.cfg_max_wf_length(max_duration) + Flux_lm_q.AWG.get_instr().reset_waveforms_zeros() + device.ro_acq_weight_type('optimal') + device.ro_acq_averages(2**8) + device.ro_acq_digitized(True) + device.prepare_for_timedomain(qubits=[Qubit, 'D1'], bypass_flux=True) + device.prepare_fluxing(qubits=[Qubit, 'D1']+Parked_qubits) + # device.prepare_readout(qubits=[Qubit, 'D1']) + # device.ro_acq_digitized(False) + if not Parked_qubits: + Parked_qubits = None + device.measure_chevron( + q0=Qubit, + q_spec='D1', + amps=Amps, + q_parks=Parked_qubits, + lengths= np.linspace(10e-9, max_duration, 12), + target_qubit_sequence='ground', + waveform_name="square", + buffer_time=40e-9, + prepare_for_timedomain=False, + disable_metadata=True, + second_excited_state=use_second_excited_state, + ) + # Reset waveform durations + if max_duration > TWOQ_GATE_DURATION: + Flux_lm_q.cfg_max_wf_length(TWOQ_GATE_DURATION) + Flux_lm_q.AWG.get_instr().reset_waveforms_zeros() + if not Parked_qubits: + print('no parking qubits are defined') + else: + for q_park in Parked_qubits: + fl_lm_park = Q_park.instr_LutMan_Flux.get_instr() + fl_lm_park.cfg_max_wf_length(TWOQ_GATE_DURATION) + fl_lm_park.AWG.get_instr().reset_waveforms_zeros() + # Run landscape analysis + interaction_freqs = { + d : Flux_lm_q.get(f'q_freq_10_{d}')\ + for d in ['NW', 'NE', 'SW', 'SE']\ + if 2e9 > Flux_lm_q.get(f'q_freq_10_{d}') > 10e6 + } + a = ma2.tqg.TLS_landscape_Analysis( + Q_freq = station.components[Qubit].freq_qubit(), + Out_range=Flux_lm_q.cfg_awg_channel_range(), + DAC_amp=Flux_lm_q.sq_amp(), + Poly_coefs=Flux_lm_q.q_polycoeffs_freq_01_det(), + interaction_freqs=interaction_freqs) + return True + + +def Parking_experiment_wrapper(qH, qL, qP, station, relative_to_qH: bool = True, park_stepsize: float = 5e6): + ''' + Wrapper function for fine-tunig CP 180 phase, SQ phase updates of 360, and verification. + Returns True if successful calibration otherwise + returns False. + ''' + # Set gate duration + TQG_duration_ns = TWOQ_GATE_DURATION_NS - 20 + file_cfg = gc.generate_config(in_filename=input_file, + out_filename=config_fn, + mw_pulse_duration=20, + ro_duration=1000, + flux_pulse_duration=TQG_duration_ns, + init_duration=200000) + # Setup for measurement + dircts = get_gate_directions(qH, qL) + station.components['MC'].live_plot_enabled(False) + station.components['nested_MC'].live_plot_enabled(False) + Q_H = station.components[qH] + Q_L = station.components[qL] + Q_P = station.components[qP] + mw_lutman_H = Q_H.instr_LutMan_MW.get_instr() + mw_lutman_L = Q_L.instr_LutMan_MW.get_instr() + flux_lm_H = Q_H.instr_LutMan_Flux.get_instr() + flux_lm_L = Q_L.instr_LutMan_Flux.get_instr() + # Set preparation params + device = station['device'] + device.ro_acq_weight_type('optimal') + device.ro_acq_averages(2**9) + # # Prepare readout + if check_prepare_readout(qubits=[qH, qL, qP], station=station): + device.prepare_readout(qubits=[qH, qL, qP]) + else: + # if preparation is not deemed necessary try just updating detectors + try: + acq_ch_map = device._acq_ch_map + device._prep_ro_instantiate_detectors(qubits=[qH, qL, qP], acq_ch_map=acq_ch_map) + except: + device.prepare_readout(qubits=[qH, qL, qP]) + # device.prepare_readout(qubits=[qH, qL, qP]) + # Prepare MW pulses + for q in [qH, qL, qP]: + Q = station.components[q] + Q._prep_td_sources() + if check_prepare_mw(q, station): + Q.cfg_prepare_mw_awg(False) + Q._prep_mw_pulses() + Q.cfg_prepare_mw_awg(True) + mw_lm = Q.instr_LutMan_MW.get_instr() + mw_lm.set_default_lutmap() + mw_lm.load_phase_pulses_to_AWG_lookuptable() + # Set DAC amplitude for 2Q gate + det_qH = flux_lm_H.get(f'q_freq_10_{dircts[0]}') + det_qL = flux_lm_L.get(f'q_freq_10_{dircts[1]}') + amp_qH = get_DAC_amp_frequency(det_qH, flux_lm_H) + amp_qL = get_DAC_amp_frequency(det_qL, flux_lm_L) + for i, det, amp, flux_lm in zip([ 0, 1], + [ det_qH, det_qL], + [ amp_qH, amp_qL], + [flux_lm_H, flux_lm_L]): + if det < 20e6: + flux_lm.set(f'vcz_amp_dac_at_11_02_{dircts[i]}', 0) + else: + flux_lm.set(f'vcz_amp_dac_at_11_02_{dircts[i]}', amp) + # Load flux waveforms + load_single_waveform_on_HDAWG(flux_lm_H, f'cz_{dircts[0]}') + load_single_waveform_on_HDAWG(flux_lm_L, f'cz_{dircts[1]}') + # Park remainder neighboring qubits away (at least 1GHz from qH) + Qspectators = get_parking_qubits(qH, qL) + for q in Qspectators: + if q != qP: + Q = station.components[q] + flux_lm = Q.instr_LutMan_Flux.get_instr() + park_dist = 1000e6 + park_freq = Q_H.freq_qubit()-det_qH-park_dist + park_det = Q.freq_qubit()-park_freq + amp_park = get_DAC_amp_frequency(park_det, flux_lm) + flux_lm.set('park_amp', amp_park) + load_single_waveform_on_HDAWG(flux_lm, 'park') + # Select parking distances + # (we start the sweep at 20 MHz qP detuning) + park_det_init = 20e6 + park_dist_init = park_det_init + if relative_to_qH: + park_freq = Q_P.freq_qubit() - park_det_init + park_dist_init = Q_H.freq_qubit()-det_qH-park_freq + Park_distances = np.arange(park_dist_init, 1000e6, park_stepsize) + # Measure + # device.calibrate_park_frequency( + # qH=qH, qL=qL, qP=qP, + # Park_distances = Park_distances, + # prepare_for_timedomain=False, + # disable_metadata=False) + return True + + +def Calibrate_CZ_gate(qH, qL, station, + qL_det: float = 0, + park_distance: float = 700e6, + apply_parking_settings: bool = True, + asymmetry_compensation: bool = False, + calibrate_asymmetry: bool = True, + benchmark: bool = True, + tmid_offset_samples: int = 0, + calibration_type: str = 'full', + **kw): + ''' + Calibrates and benchmarks two-qubit gate. + Calibration types: + Full : Performs full cz calibration. + Express: Only single qubit-phase calibration. + Fine: Fine cphase calibration using Bamp sweep. + + ''' + # Parse calibration steps + calibration_steps = [] + if 'full' in calibration_type.lower(): + calibration_steps.append('Chevron') + calibration_steps.append('Tmid') + calibration_steps.append('AB') + if 'no chevron' in calibration_type.lower(): + if 'Chevron' in calibration_steps: + calibration_steps.remove('Chevron') + else: + if 'tmid' in calibration_type.lower(): + calibration_steps.append('Tmid') + if 'ab' in calibration_type.lower(): + calibration_steps.append('AB') + + # Calibrate for asymmetryc cz pulse? + use_negative_amp=False + if asymmetry_compensation: + use_negative_amp=True + if 'fine' in calibration_type.lower(): + fine_calibration = True + else: + fine_calibration = False + # Perform calibration steps + if 'Chevron' in calibration_steps: + # Measure interaction Chevron + Chevron_wrapper(qH=qH, qL=qL, station=station, + qL_det=qL_det, + park_distance=park_distance, + negative_amp=use_negative_amp, + **kw) + if 'Tmid' in calibration_steps: + # SNZ A vs Tmid landscape + SNZ_tmid_wrapper(qH=qH, qL=qL, station=station, + apply_parking_settings=apply_parking_settings, + park_distance=park_distance, + asymmetry_compensation=asymmetry_compensation, + tmid_offset_samples=tmid_offset_samples, + **kw) + if 'AB' in calibration_steps: + # SNZ A vs B landscape + SNZ_AB_wrapper(qH=qH, qL=qL, station=station, + apply_parking_settings=apply_parking_settings, + park_distance=park_distance, + asymmetry_compensation=asymmetry_compensation, + **kw) + # Pulse asymmetry calibration + if calibrate_asymmetry: + Asymmetry_wrapper(qH=qH, qL=qL, station=station) + # Single qubit phase calibration + Single_qubit_phase_calibration_wrapper( + qH=qH, qL=qL, station=station, + apply_parking_settings=apply_parking_settings, + park_distance=park_distance, + fine_cphase_calibration=fine_calibration) + # Interleaved randomized benchmarking + if benchmark: + TwoQ_Randomized_benchmarking_wrapper(qH=qH, qL=qL, station=station, **kw) + + +############################################################################### +# Parity check calibration graph +############################################################################### +import os +import pycqed as pq +from pycqed.measurement.openql_experiments import generate_CC_cfg as gc +input_file = os.path.join(pq.__path__[0], 'measurement', + 'openql_experiments', 'config_cc_s17_direct_iq.json.in') +config_fn = os.path.join(pq.__path__[0], 'measurement', + 'openql_experiments', 'output_cc_s17','config_cc_s17_direct_iq.json') + + +class Parity_check_calibration(AutoDepGraph_DAG): + def __init__(self, + name: str, + station, + stabilizers: list = None, + **kwargs): + super().__init__(name, **kwargs) + if stabilizers == None: + stabilizers = [ + 'Z1', + 'Z2', + 'Z3', + 'Z4', + # 'X1', + # 'X2', + # 'X3', + # 'X4', + ] + self.station = station + self.create_dep_graph(stabilizers=stabilizers) + + def create_dep_graph(self, stabilizers:list): + ''' + Dependency graph for the calibration of + single-qubit gates. + ''' + print(f'Creating dependency graph for Parity check calibration') + ############################## + # Grah nodes + ############################## + module_name = 'pycqed.instrument_drivers.meta_instrument.Surface17_dependency_graph' + + + for stab in stabilizers: + + self.add_node(f'{stab} Horizontal calibration', + calibrate_function=module_name+'.Horizontal_calibration_wrapper', + calibrate_function_args={ + 'stabilizer_qubit': stab, + 'station': self.station + }) + + self.add_node(f'{stab} Ancilla phase verification', + calibrate_function=module_name+'.Measure_parity_check_phase_wrapper', + calibrate_function_args={ + 'stabilizer_qubit': stab, + 'station': self.station + }) + + self.add_node(f'{stab} Data-qubit phase correction', + calibrate_function=module_name+'.Data_qubit_phase_calibration_wrapper', + calibrate_function_args={ + 'stabilizer_qubit': stab, + 'station': self.station + }) + + self.add_node(f'{stab} Parity assignment fidelity', + calibrate_function=module_name+'.Parity_check_fidelity_wrapper', + calibrate_function_args={ + 'stabilizer_qubit': stab, + 'station': self.station + }) + + self.add_node(f'{stab} Parity repeatability', + calibrate_function=module_name+'.Parity_check_repeatability_wrapper', + calibrate_function_args={ + 'stabilizer_qubit': stab, + 'station': self.station + }) + + # self.add_node('Spectator_data_qubits', + # calibrate_function=module_name+'.Spectator_data_qubits_wrapper', + # calibrate_function_args={ + # 'stabilizer': stab, + # 'station': self.station + # }) + + # self.add_node('DIO_calibration', + # calibrate_function=module_name+'.DIO_calibration', + # calibrate_function_args={ + # 'stabilizer': stab, + # 'station': self.station + # }) + + # Save snpashot + self.add_node('Save snapshot', + calibrate_function=module_name+'.save_snapshot_metadata', + calibrate_function_args={ + 'station': self.station, + }) + + ############################## + # Node depdendencies + ############################## + for stab in stabilizers: + self.add_edge('Save snapshot', + f'{stab} Parity repeatability') + + self.add_edge(f'{stab} Parity repeatability', + f'{stab} Parity assignment fidelity') + + self.add_edge(f'{stab} Parity assignment fidelity', + f'{stab} Data-qubit phase correction') + + self.add_edge(f'{stab} Data-qubit phase correction', + f'{stab} Ancilla phase verification') + + self.add_edge(f'{stab} Ancilla phase verification', + f'{stab} Horizontal calibration') + + ############################## + # Create graph + ############################## + self.cfg_plot_mode = 'svg' + self.update_monitor() + self.cfg_svg_filename + url = self.open_html_viewer() + print('Dependency graph created at ' + url) + + +def Prepare_for_parity_check_wrapper(station): + ''' + Wrapper function to prepare for timedomain of all parity checks of + a stabilizer. + ''' + # Prepare for timedomain of parity check + for q in ['Z1', 'Z2', 'Z3', 'Z4', + 'X1', 'X2', 'X3', 'X4']: + prepare_for_parity_check(q, station) + return True + + +def Horizontal_calibration_wrapper(stabilizer_qubit, station, + Q_control: list = None, + flux_cw_list = None, + mw_phase_param = None): + ''' + Wrapper function to calibrate parity check CZ phases + returns True + ''' + station.components['MC'].live_plot_enabled(False) + station.components['nested_MC'].live_plot_enabled(False) + # Prepare for timedomain of parity check + device = station.components['device'] + device.ro_acq_averages(2**8) + device.ro_acq_digitized(False) + prepare_for_parity_check(stabilizer_qubit, station, + Data_qubits = Q_control) + Q_ancilla = [stabilizer_qubit] + if not Q_control: + Q_control = list(get_nearest_neighbors(stabilizer_qubit).keys()) + # Parity check settings + if flux_cw_list == None: + if 'X' in stabilizer_qubit: + flux_cw_list = [f'flux_dance_{i}' for i in [1, 2, 3, 4]] + else: + flux_cw_list = [f'flux_dance_{i}' for i in [5, 6, 7, 8]] + flux_cw_list = ['cz' for q in Q_control] + if mw_phase_param == None: + # if 'X' in stabilizer_qubit: + # mw_phase_param = 'vcz_virtual_q_ph_corr_step_4' + # else: + # mw_phase_param = 'vcz_virtual_q_ph_corr_step_8' + dircts = get_gate_directions(q0=stabilizer_qubit, q1=Q_control[0]) + mw_phase_param = f'vcz_virtual_q_ph_corr_{dircts[0]}' + # Calibrate CZ with each data qubit + for q in Q_control: + # If high frequency qubit + if q in ['D4','D5','D6']: + # Order of qubits requires high freq. qubit first + Q_pair_target = [q, stabilizer_qubit] + else: + Q_pair_target = [stabilizer_qubit, q] + device.calibrate_parity_check_phase( + Q_ancilla = Q_ancilla, + Q_control = Q_control, + Q_pair_target = Q_pair_target, + flux_cw_list = flux_cw_list, + downsample_angle_points = 3, + mw_phase_param = mw_phase_param, + update_flux_param=True, + update_mw_phase=True, + prepare_for_timedomain=False, + disable_metadata = True) + # upload new waveform + Q_H = station.components[Q_pair_target[0]] + fl_lm_q = Q_H.instr_LutMan_Flux.get_instr() + dircts = get_gate_directions(*Q_pair_target) + load_single_waveform_on_HDAWG(fl_lm_q, f'cz_{dircts[0]}') + # upload phase corrections + Q_S = station.components[stabilizer_qubit] + mw_lm_q = Q_S.instr_LutMan_MW.get_instr() + mw_lm_q.upload_single_qubit_phase_corrections() + return True + + +def Measure_parity_check_phase_wrapper(stabilizer_qubit, station, + Q_control: list = None, + flux_cw_list = None, + mw_phase_param = None, + pc_repetitions: int = 1, + ): + ''' + Wrapper function to measure pairty checks + returns True + ''' + station.components['MC'].live_plot_enabled(False) + station.components['nested_MC'].live_plot_enabled(False) + # Prepare for timedomain of parity check + device = station.components['device'] + device.ro_acq_averages(2**8) + device.ro_acq_digitized(False) + prepare_for_parity_check(stabilizer_qubit, station, + Data_qubits = Q_control) + # Parity check settings + Q_ancilla = [stabilizer_qubit] + if not Q_control: + Q_control = list(get_nearest_neighbors(stabilizer_qubit).keys()) + if flux_cw_list == None: + if 'X' in stabilizer_qubit: + flux_cw_list = [f'flux_dance_{i}' for i in [1, 2, 3, 4]] + else: + flux_cw_list = [f'flux_dance_{i}' for i in [5, 6, 7, 8]] + flux_cw_list = ['cz' for q in Q_control] + if mw_phase_param == None: + # if 'X' in stabilizer_qubit: + # mw_phase_param = 'vcz_virtual_q_ph_corr_step_4' + # else: + # mw_phase_param = 'vcz_virtual_q_ph_corr_step_8' + dircts = get_gate_directions(q0 = stabilizer_qubit, q1 = Q_control[0]) + mw_phase_param = f'vcz_virtual_q_ph_corr_{dircts[0]}' + # Measure + qoi = device.measure_parity_check_ramsey( + Q_target = Q_ancilla, + Q_control = Q_control, + flux_cw_list = flux_cw_list, + mw_phase_param=mw_phase_param, + pc_repetitions=pc_repetitions, + update_mw_phase=True, + prepare_for_timedomain = False, + disable_metadata=True) + # upload phase corrections + Q_S = station.components[stabilizer_qubit] + mw_lm_q = Q_S.instr_LutMan_MW.get_instr() + mw_lm_q.upload_single_qubit_phase_corrections() + return qoi + + +def Data_qubit_phase_calibration_wrapper(stabilizer_qubit, station, + Q_data: list = None, + flux_cw_list = None, + mw_phase_param = None, + pc_repetitions: int = 1, + ): + ''' + Wrapper function to calibrate single qubit phases of data-qubits in pairty checks + returns True + ''' + station.components['MC'].live_plot_enabled(False) + station.components['nested_MC'].live_plot_enabled(False) + # Prepare for timedomain of parity check + device = station.components['device'] + device.ro_acq_averages(2**8) + device.ro_acq_digitized(False) + prepare_for_parity_check(stabilizer_qubit, station, + Data_qubits = Q_data) + # Parity check settings + Q_ancilla = [stabilizer_qubit] + if not Q_data: + Q_data = list(get_nearest_neighbors(stabilizer_qubit).keys()) + if flux_cw_list == None: + if 'X' in stabilizer_qubit: + flux_cw_list = [f'flux_dance_{i}' for i in [1, 2, 3, 4]] + else: + flux_cw_list = [f'flux_dance_{i}' for i in [5, 6, 7, 8]] + flux_cw_list = ['cz' for q in Q_data] + if mw_phase_param == None: + # if 'X' in stabilizer_qubit: + # mw_phase_param = 'vcz_virtual_q_ph_corr_step_4' + # else: + # mw_phase_param = 'vcz_virtual_q_ph_corr_step_8' + mw_phase_param = [] + for q in Q_data: + dircts = get_gate_directions(q0 = q, q1 = stabilizer_qubit) + mw_phase_param.append(f'vcz_virtual_q_ph_corr_{dircts[0]}') + # Measure + qoi = device.measure_parity_check_ramsey( + Q_target = Q_data, + Q_control = Q_ancilla, + flux_cw_list = flux_cw_list, + downsample_angle_points = 1, + update_mw_phase=True, + mw_phase_param=mw_phase_param, + pc_repetitions=pc_repetitions, + prepare_for_timedomain = False, + disable_metadata=True) + # upload phase corrections + for q in Q_data: + Q_c = station.components[q] + mw_lm_q = Q_c.instr_LutMan_MW.get_instr() + mw_lm_q.upload_single_qubit_phase_corrections() + return qoi + + +def Parity_check_fidelity_wrapper(stabilizer_qubit, station, + Q_data: list = None, + heralded_init = False, + flux_cw_list = None): + ''' + Wrapper function to measure pairty checks + returns True + ''' + station.components['MC'].live_plot_enabled(False) + station.components['nested_MC'].live_plot_enabled(False) + # Prepare for timedomain of parity check + device = station.components['device'] + device.ro_acq_averages(2**10) + device.ro_acq_digitized(False) + device.ro_acq_integration_length(500e-9) + prepare_for_parity_check(stabilizer_qubit, station, Data_qubits = Q_data) + # Parity check settings + Q_ancilla = [stabilizer_qubit] + if not Q_data: + Q_data = list(get_nearest_neighbors(stabilizer_qubit).keys()) + if flux_cw_list == None: + if 'X' in stabilizer_qubit: + flux_cw_list = [f'flux_dance_{i}' for i in [1, 2, 3, 4]] + else: + flux_cw_list = [f'flux_dance_{i}' for i in [5, 6, 7, 8]] + flux_cw_list = ['cz' for q in Q_data] + if not heralded_init: + device.prepare_readout(qubits=Q_ancilla) + # Measure + device.measure_parity_check_fidelity( + Q_ancilla = Q_ancilla, + Q_control = Q_data, + flux_cw_list = flux_cw_list, + initialization_msmt=heralded_init, + prepare_for_timedomain = False, + disable_metadata=True) + return True + + +def Parity_check_repeatability_wrapper(stabilizer_qubit, station, + Q_data: list = None, + sim_measurement = True, + readout_duration_ns = 420, + repetitions = 5, + heralded_init = False, + flux_cw_list = None, + n_rounds = None,): + ''' + Wrapper function to measure pairty check repeatability + n_rounds : list + returns True + ''' + # Set Parity check duration + station.components['MC'].live_plot_enabled(False) + station.components['nested_MC'].live_plot_enabled(False) + # Prepare for timedomain of parity check + device = station.components['device'] + device.ro_acq_averages(2**10) + device.ro_acq_digitized(False) + device.ro_acq_integration_length(500e-9) + prepare_for_parity_check(stabilizer_qubit, station, + Data_qubits = Q_data) + if not Q_data: + Q_data = list(get_nearest_neighbors(stabilizer_qubit).keys()) + if flux_cw_list == None: + if 'X' in stabilizer_qubit: + flux_cw_list = [f'flux_dance_{i}' for i in [1, 2, 3, 4]] + else: + flux_cw_list = [f'flux_dance_{i}' for i in [5, 6, 7, 8]] + flux_cw_list = ['cz' for q in Q_data] + # can't avoid preparaing for timedomain here as it orders the qubits + if n_rounds == None: + # n_rounds = [1, 2] + n_rounds = [2] + # Measure + for n in n_rounds: + device.measure_weight_n_parity_tomography( + ancilla_qubit = stabilizer_qubit, + data_qubits = Q_data, + flux_cw_list = flux_cw_list, + sim_measurement=sim_measurement, + readout_duration_ns = readout_duration_ns, + n_rounds = n, + repetitions = 3, + prepare_for_timedomain = True, + disable_metadata=True) + return True + + +def Surface_13_wrapper(station, log_zero = None, + measurement_time_ns: int = 500, + prepare_LRU_pulses: bool = True): + ''' + Wrapper routine to measure the surface-13 experiment. + ''' + ####################################################### + # Preparation + ####################################################### + assert (measurement_time_ns-20)%40 == 0, 'Not a valid measurement time!' + # Set configuration + TQG_duration_ns = TWOQ_GATE_DURATION_NS - 20 + file_cfg = gc.generate_config(in_filename=input_file, + out_filename=config_fn, + mw_pulse_duration=20, + ro_duration=measurement_time_ns, + flux_pulse_duration=TQG_duration_ns, + init_duration=200000) + station.components['MC'].live_plot_enabled(False) + station.components['nested_MC'].live_plot_enabled(False) + device = station.components['device'] + integration_time = measurement_time_ns-20 + device.ro_acq_integration_length(integration_time*1e-9) + # Get qubits directly involved in parity check + Data_qubits = ['D1', 'D2', 'D3', + 'D4', 'D5', 'D6', + 'D7', 'D8', 'D9'] + Ancilla_qubits = ['Z1', 'Z2', 'Z3', 'Z4'] + All_qubits = Data_qubits + Ancilla_qubits + # Prepare qubits + import time + # prepare readout + # t_ro = time.time() + # # prepapare readout detectors + # device.ro_acq_weight_type('custom') + # # ordered_ro_list = ['Z3', 'D4', 'D5', 'D1', 'D2', 'D3', 'D7', 'Z1', 'D6', + # # 'D8', 'D9', 'Z2', 'Z4'] + # ordered_ro_list = ['Z3', 'D4', 'D5', 'X2', 'D1', 'X1', 'D2', 'D3', 'D7', + # 'X3', 'Z1', 'D6', 'D8', 'D9', 'Z2', 'Z4'] + # ordered_ro_dict = {q: 'optimal IQ' for q in ordered_ro_list} + # acq_ch_map = device._prep_ro_assign_weights(qubits=ordered_ro_list, + # qubit_int_weight_type_dict = ordered_ro_dict) + # device._prep_ro_integration_weights(qubits=ordered_ro_list, + # qubit_int_weight_type_dict = ordered_ro_dict) + # device._prep_ro_instantiate_detectors(qubits=ordered_ro_list, + # acq_ch_map=acq_ch_map) + # # Prepare readout pulses with custom channel map + # RO_lutman_1 = station.components['RO_lutman_1'] + # RO_lutman_2 = station.components['RO_lutman_2'] + # RO_lutman_3 = station.components['RO_lutman_3'] + # RO_lutman_4 = station.components['RO_lutman_4'] + # if [11] not in RO_lutman_1.resonator_combinations(): + # RO_lutman_1.resonator_combinations([[11], + # RO_lutman_1.resonator_combinations()[0]]) + # RO_lutman_1.load_waveforms_onto_AWG_lookuptable() + # if [3, 7] not in RO_lutman_2.resonator_combinations(): + # RO_lutman_2.resonator_combinations([[3, 7], + # RO_lutman_2.resonator_combinations()[0]]) + # RO_lutman_2.load_waveforms_onto_AWG_lookuptable() + # if [8, 12] not in RO_lutman_4.resonator_combinations(): + # RO_lutman_4.resonator_combinations([[8, 12], + # RO_lutman_4.resonator_combinations()[0]]) + # RO_lutman_4.load_waveforms_onto_AWG_lookuptable() + # if [14, 10] not in RO_lutman_3.resonator_combinations(): + # RO_lutman_3.resonator_combinations([[14, 10], + # RO_lutman_3.resonator_combinations()[0]]) + # RO_lutman_3.load_waveforms_onto_AWG_lookuptable() + # t_ro = time.time()-t_ro + # prepare flux pulses + t_fl = time.time() + # Set Flux pulse amplitudes + for q in All_qubits: + fl_lm_q = station.components[q].instr_LutMan_Flux.get_instr() + set_combined_waveform_amplitudes(fl_lm_q) + # upload flux pulses of data qubits + for stabilizer_qubit in Ancilla_qubits: + Neighbors_dict = get_nearest_neighbors(stabilizer_qubit) + for q, dirct in Neighbors_dict.items(): + fl_lm_q = station.components[q].instr_LutMan_Flux.get_instr() + waveforms_to_upload = ['park', f'cz_{dirct}'] + load_single_waveform_on_HDAWG(fl_lm_q, waveforms_to_upload) + # upload flux pulses of ancilla qubit + Q_A = station.components[stabilizer_qubit] + fl_lm_q = Q_A.instr_LutMan_Flux.get_instr() + waveforms_to_upload = [] + for dirct in Neighbors_dict.values(): + if dirct == 'NW': + waveforms_to_upload.append('cz_SE') + elif dirct == 'NE': + waveforms_to_upload.append('cz_SW') + elif dirct == 'SW': + waveforms_to_upload.append('cz_NE') + else: # SE + waveforms_to_upload.append('cz_NW') + load_single_waveform_on_HDAWG(fl_lm_q, waveforms_to_upload) + t_fl = time.time()-t_fl + # prepare timings + t_tim = time.time() + device.prepare_timing() + t_tim = time.time()-t_tim + # Upload mw pulses + t_mw = time.time() + for q in All_qubits: + Q = station.components[q] + Q._prep_td_sources() + mw_lm = Q.instr_LutMan_MW.get_instr() + mw_lm.upload_single_qubit_phase_corrections() + if check_prepare_mw(q, station, lutmap='default_lutmap'): + mw_lm = Q.instr_LutMan_MW.get_instr() + mw_lm.set_default_lutmap() + Q._prep_mw_pulses() + t_mw = time.time()-t_mw + + # Prepare LRUs if desired + if prepare_LRU_pulses: + LRU_duration = measurement_time_ns-20 + Z_LRU_duration = measurement_time_ns//3-20 + # Set LRU LO triggering + MW_LO_6 = station.components['MW_LO_6'] + MW_LO_6.pulsemod_state(True) + MW_LO_6.pulsemod_source('INT') + MW_LO_6.visa_handle.write('pulm:delay 160 ns') + MW_LO_6.visa_handle.write(f'pulm:width {measurement_time_ns+Z_LRU_duration} ns') + MW_LO_6.visa_handle.write('pulm:trig:mode EXT') + MW_LO_6.visa_handle.write('conn:trig:omod PETR') + MW_LO_7 = station.components['MW_LO_7'] + MW_LO_7.pulsemod_state(True) + MW_LO_7.pulsemod_source('INT') + MW_LO_7.visa_handle.write('pulm:delay 390 ns') + MW_LO_7.visa_handle.write('pulm:width 140 ns') + MW_LO_7.visa_handle.write('pulm:trig:mode EXT') + MW_LO_7.visa_handle.write('conn:trig:omod PETR') + # for D4, D5, D6 + station.components['D4'].LRU_duration(LRU_duration*1e-9+Z_LRU_duration*1e-9) + station.components['D5'].LRU_duration(LRU_duration*1e-9+Z_LRU_duration*1e-9) + station.components['D6'].LRU_duration(LRU_duration*1e-9+Z_LRU_duration*1e-9) + station.components['D6'].LRU_duration(Z_LRU_duration*1e-9) + station.components['Z1'].LRU_duration(Z_LRU_duration*1e-9) + station.components['Z2'].LRU_duration(Z_LRU_duration*1e-9) + station.components['Z3'].LRU_duration(Z_LRU_duration*1e-9) + station.components['Z4'].LRU_duration(Z_LRU_duration*1e-9) + # upload pulses + station.components['D4']._prep_LRU_pulses() + station.components['D5']._prep_LRU_pulses() + station.components['D6']._prep_LRU_pulses() + station.components['Z1']._prep_LRU_pulses() + station.components['Z2']._prep_LRU_pulses() + station.components['Z2']._prep_LRU_pulses() + station.components['Z4']._prep_LRU_pulses() + + # print(f'Preparation time RO:\t{t_ro}') + print(f'Preparation time FL:\t{t_fl}') + print(f'Preparation time TIM:\t{t_tim}') + print(f'Preparation time MW:\t{t_mw}') + # Run experiment + device.measure_defect_rate( + ancilla_qubit='Z3', + data_qubits=['D4', 'D7'], + experiments=['surface_13', 'surface_13_LRU',], + Rounds= [15], + repetitions = 2, + lru_qubits = ['D4', 'D5', 'D6', 'Z1', 'Z2', 'Z3', 'Z4'], + prepare_for_timedomain = False, + prepare_readout = True, + heralded_init = True, + stabilizer_type = 'X', + initial_state_qubits = None, + measurement_time_ns = measurement_time_ns, + analyze = False) + # Run Pij matrix analysis + a = ma2.pba.Repeated_stabilizer_measurements( + ancilla_qubit = Ancilla_qubits, + data_qubits = Data_qubits, + Rounds = [15], + heralded_init = True, + number_of_kernels = 2, + Pij_matrix = True, + experiments = ['surface_13', 'surface_13_LRU'], + extract_only = False) + # # Surface_13 experiment + if log_zero: + for state in [ + [], # I + ['D1','D2'], # X1 + ['D8','D9'], # X4 + ['D2','D3','D5','D6'], # X2 + ['D4','D5','D7','D8'], # X3 + ['D1','D3','D5','D6'], # X1 X2 + ['D4','D5','D7','D9'], # X3 X4 + ['D1','D2','D8','D9'], # X1 X4 + ['D2','D3','D4','D6','D7','D8'], # X2 X3 + ['D1','D2','D4','D5','D7','D8'], # X1 X3 + ['D2','D3','D5','D6','D8','D9'], # X2 X4 + ['D1','D3','D4','D6','D7','D8'], # X1 X2 X3 + ['D2','D3','D4','D6','D7','D9'], # X2 X3 X4 + ['D1','D3','D5','D6','D8','D9'], # X1 X2 X4 + ['D1','D2','D4','D5','D7','D9'], # X1 X3 X4 + ['D1','D3','D4','D6','D7','D9'] # X1 X2 X3 X4 + ]: + device.measure_defect_rate( + ancilla_qubit='Z3', + data_qubits=['D4', 'D7'], + experiments=['surface_13', 'surface_13_LRU'], + Rounds=[1, 2, 4, 8, 16], + lru_qubits = ['D4', 'D5', 'D6', 'Z1', 'Z2', 'Z3', 'Z4'], + repetitions = 20, + prepare_for_timedomain = False, + prepare_readout = False, + heralded_init = True, + stabilizer_type = 'Z', + initial_state_qubits = state, + measurement_time_ns = measurement_time_ns, + analyze = False) + # Run Pij matrix analysis + a = ma2.pba.Repeated_stabilizer_measurements( + ancilla_qubit = Ancilla_qubits, + # experiments=['surface_13'], + experiments=['surface_13', 'surface_13_LRU'], + data_qubits = Data_qubits, + Rounds = [1, 2, 4, 8, 16], + heralded_init = True, + number_of_kernels = 2, + Pij_matrix = True, + extract_only = False) + + +########################################### +# Helper functions for theory predictions # +########################################### +def transmon_hamiltonian(n, Ec, Ej, phi=0, ng=0): + Ej_f = Ej*np.abs(np.cos(np.pi*phi)) + I = np.diag((np.arange(-n-ng,n+1-ng)-0)**2,k=0) + D = np.diag(np.ones(2*n),k=1) + np.diag(np.ones(2*n),k=-1) + return 4*Ec*I-Ej_f/2*D + +def solve_hamiltonian(EC, EJ, phi=0, ng=0, n_level=1): + n = 10 + H = transmon_hamiltonian(n, EC, EJ, phi=phi, ng=ng) + eigvals, eigvec = np.linalg.eigh(H) + eigvals -= eigvals[0] + freq_1 = eigvals[n_level] + freq_2 = eigvals[n_level+1] + return freq_1, freq_2 + +from scipy.optimize import minimize +def find_transmon_params(f0, a0): + # Define cost function to minimize + def cost_func(param): + EC, EJ = param + EC *= 1e6 # Needed for optimizer to converge + EJ *= 1e9 # + n = 10 + H = transmon_hamiltonian(n, EC, EJ, phi=0) + eigvals, eigvec = np.linalg.eigh(H) + eigvals -= eigvals[0] + freq = eigvals[1] + anha = eigvals[2]-2*eigvals[1] + return (freq-f0)**2 + (anha-a0)**2 + # Run minimizer and record values + Ec, Ej = minimize(cost_func, x0=[300, 15], options={'disp':True}).x + Ec *= 1e6 + Ej *= 1e9 + return Ec, Ej + +def calculate_avoided_crossing_detuning(f_H, f_L, a_H, a_L): + Ec_H, Ej_H = find_transmon_params(f_H, a_H) + Phi = np.linspace(0, .4, 21) + E02 = np.ones(21) + E11 = np.ones(21) + for i, p in enumerate(Phi): + E1, E2 = solve_hamiltonian(Ec_H, Ej_H, phi=p, ng=0, n_level=1) + E02[i] = E2 + E11[i] = E1+f_L + p_02 = np.poly1d(np.polyfit(Phi, E02, deg=2)) + p_11 = np.poly1d(np.polyfit(Phi, E11, deg=2)) + # detuning of 11-02 + phi_int_1 = np.max((p_02-p_11).roots) + detuning_1 = p_11(0)-p_11(phi_int_1) + # detuning of 11-20 + f_20 = 2*f_L+a_L + phi_int_2 = np.max((p_11-f_20).roots) + detuning_2 = p_11(0)-p_11(phi_int_2) + return detuning_1, detuning_2 + +############################################ +# Helper functions for waveform parameters # +############################################ +def get_parking_frequency(qubit_name: str) -> float: + """:return: Qubit frequency when parking [Hz].""" + qubit: Transmon = Device.find_instrument(qubit_name) + flux_lutman: FluxLutMan = qubit.instr_LutMan_Flux.get_instr() + park_detuning: float = get_frequency_waveform( + wave_par='park_amp', + flux_lutman=flux_lutman, + ) + qubit_frequency: float = qubit.freq_qubit() + return qubit_frequency - park_detuning + +def set_parking_frequency(qubit_name: str, park_frequency: float) -> float: + """:return: Qubit frequency when parking [Hz].""" + qubit: Transmon = Device.find_instrument(qubit_name) + flux_lutman: FluxLutMan = qubit.instr_LutMan_Flux.get_instr() + qubit_frequency: float = qubit.freq_qubit() + park_detuning: float = qubit_frequency - park_frequency + park_amplitude: float = get_DAC_amp_frequency( + freq=park_detuning, + flux_lutman=flux_lutman, + negative_amp=False, + ) + # Logging info + old_park_frequency: float = get_parking_frequency(qubit_name) + old_park_amplitude: float = flux_lutman.park_amp() + # Update parking amplitude + flux_lutman.park_amp(park_amplitude) + logging.info(f"Parking amplitude of {qubit_name} is updated from {old_park_amplitude} ({(old_park_frequency * 1e-9):0.2f} GHz) to {park_amplitude} ({(park_frequency * 1e-9):0.2f} GHz)") + return get_parking_frequency(qubit_name) + +def set_parking_detuning(qubit_name: str, park_detuning: float) -> float: + """:return: Qubit frequency when parking [Hz].""" + qubit: Transmon = Device.find_instrument(qubit_name) + qubit_frequency: float = qubit.freq_qubit() + park_frequency: float = qubit_frequency - park_detuning + return set_parking_frequency( + qubit_name=qubit_name, + park_frequency=park_frequency, + ) + +def get_frequency_waveform(wave_par, flux_lutman): + ''' + Calculate detuning of waveform. + ''' + poly_coefs = flux_lutman.q_polycoeffs_freq_01_det() + out_range = flux_lutman.cfg_awg_channel_range() + ch_amp = flux_lutman.cfg_awg_channel_amplitude() + dac_amp = flux_lutman.get(wave_par) + out_volt = dac_amp*ch_amp*out_range/2 + poly_func = np.poly1d(poly_coefs) + freq = poly_func(out_volt) + return freq + +def get_DAC_amp_frequency(freq, flux_lutman, negative_amp:bool=False): + ''' + Function to calculate DAC amp corresponding + to frequency detuning. + ''' + poly_coefs = flux_lutman.q_polycoeffs_freq_01_det() + out_range = flux_lutman.cfg_awg_channel_range() + ch_amp = flux_lutman.cfg_awg_channel_amplitude() + poly_func = np.poly1d(poly_coefs) + if negative_amp: + out_volt = min((poly_func-freq).roots) + else: + out_volt = max((poly_func-freq).roots) + sq_amp = out_volt/(ch_amp*out_range/2) + # Safe check in case amplitude exceeds maximum + if abs(sq_amp)>1: + print(f'WARNING had to increase gain of {flux_lutman.name} to {ch_amp}!') + flux_lutman.cfg_awg_channel_amplitude(ch_amp*1.5) + # Can't believe Im actually using recursion!!! + sq_amp = get_DAC_amp_frequency(freq, flux_lutman) + return sq_amp + +def get_Ch_amp_frequency(freq, flux_lutman, DAC_param='sq_amp'): + ''' + Function to calculate channel gain corresponding + to frequency detuning. + ''' + poly_coefs = flux_lutman.q_polycoeffs_freq_01_det() + out_range = flux_lutman.cfg_awg_channel_range() + dac_amp = flux_lutman.get(DAC_param) + poly_func = np.poly1d(poly_coefs) + if dac_amp < 0: # if negative amplitude + out_volt = min((poly_func-freq).roots) + else: # if positive amplitude + out_volt = max((poly_func-freq).roots) + ch_amp = out_volt/(dac_amp*out_range/2) + if isinstance(ch_amp, complex): + print('Warning: Complex amplitude estimated, setting it to zero.') + ch_amp = 0 + return ch_amp + +def load_single_waveform_on_HDAWG(lutman, wave_id): + """ + Load a single waveform on HDAWG. + """ + AWG = lutman.AWG.get_instr() + AWG.stop() + # Allow wave_id to be a list of waveforms + if isinstance(wave_id, str): + wave_id = [wave_id] + for wf in wave_id: + if check_flux_wf_upload(lutman, wf): + print(f'Uploading {wf} in {lutman.name}.') + lutman.load_waveform_onto_AWG_lookuptable( + wave_id=wf, regenerate_waveforms=True) + lutman.cfg_awg_channel_amplitude() + lutman.cfg_awg_channel_range() + AWG.start() + +def set_combined_waveform_amplitudes(flux_lutman): + ''' + Set waveform amplitudes for all gate directions and + parking for a flux lutman. + ''' + # print(f'Setting common amps in {flux_lutman.name}.') + qubit = flux_lutman.name.split('_')[-1] + # calculate park detuning + park_det = get_frequency_waveform('park_amp', flux_lutman) + # Remove default values (need to fix this in pycqed instead) + for drct in ['NW', 'NE', 'SW', 'SE']: + det = flux_lutman.get(f'q_freq_10_{drct}') + if det == 6e9: + flux_lutman.set(f'q_freq_10_{drct}', 0) + # Get CZ detunings + cz_NW_det = flux_lutman.q_freq_10_NW() + cz_NE_det = flux_lutman.q_freq_10_NE() + cz_SW_det = flux_lutman.q_freq_10_SW() + cz_SE_det = flux_lutman.q_freq_10_SE() + # Required detunings dictionary + Detunings = {'park_amp' : park_det, + 'vcz_amp_dac_at_11_02_NW' : cz_NW_det, + 'vcz_amp_dac_at_11_02_NE' : cz_NE_det, + 'vcz_amp_dac_at_11_02_SW' : cz_SW_det, + 'vcz_amp_dac_at_11_02_SE' : cz_SE_det} + # Find waveform with maximum detuning + max_wf = max(Detunings, key=Detunings.get) + # Set amplitude of DAC to 0.5 and scale gain accordingly + if qubit in OFFSET_QUBITS: + flux_lutman.set(max_wf, -0.3) + else: + flux_lutman.set(max_wf, 0.3) + max_wf_gain = get_Ch_amp_frequency(Detunings[max_wf], flux_lutman, + DAC_param = max_wf) + flux_lutman.cfg_awg_channel_amplitude(max_wf_gain) + Detunings.pop(max_wf) # remove waveform from detuning dict + # Set remaining waveform amplitudes + for wf, det in Detunings.items(): + if det > 20e6: + wf_amp = get_DAC_amp_frequency(det, flux_lutman, + negative_amp=True if qubit in OFFSET_QUBITS else False) + else: + wf_amp = 0 + flux_lutman.set(wf, wf_amp) + +def prepare_for_parity_check(stabilizer_qubit, station, + Data_qubits: list = None): + ''' + Wrapper function to prepare for timedomain of parity check of + a stabilizer. + ''' + # Set configuration + TQG_duration_ns = TWOQ_GATE_DURATION_NS - 20 + file_cfg = gc.generate_config(in_filename=input_file, + out_filename=config_fn, + mw_pulse_duration=20, + ro_duration=500, + flux_pulse_duration=TQG_duration_ns, + init_duration=200000) + device = station.components['device'] + device.ro_acq_weight_type('optimal') + # Get qubits directly involved in parity check + if not Data_qubits: + Data_qubits = list(get_nearest_neighbors(stabilizer_qubit).keys()) + PC_qubits = [stabilizer_qubit]+Data_qubits + # Get spectator qubits of parity check + Spec_qubits = [] + for q in Data_qubits: + _qubits = list(get_nearest_neighbors(q).keys()) + _qubits.remove(stabilizer_qubit) + Spec_qubits = Spec_qubits + _qubits + Spec_qubits = np.unique(Spec_qubits) + # Set Flux pulse amplitudes + for q in list(PC_qubits)+list(Spec_qubits): + fl_lm_q = station.components[q].instr_LutMan_Flux.get_instr() + set_combined_waveform_amplitudes(fl_lm_q) + print(f'Parity check qubits: {" ".join(PC_qubits)}') + print(f'Spectator qubits: {" ".join(Spec_qubits)}') + # Prepare parity check qubits + # device.prepare_for_timedomain(qubits=PC_qubits) + import time + # prepare readout + t_ro = time.time() + if check_prepare_readout(qubits=PC_qubits, station=station): + device.prepare_readout(qubits=PC_qubits) + else: + # if preparation is not deemed necessary try just updating detectors + try: + acq_ch_map = device._acq_ch_map + device._prep_ro_instantiate_detectors(qubits=PC_qubits, acq_ch_map=acq_ch_map) + except: + device.prepare_readout(qubits=PC_qubits) + t_ro = time.time()-t_ro + # prepare flux pulses + t_fl = time.time() + # upload flux pulses of data qubits + Neighbors_dict = get_nearest_neighbors(stabilizer_qubit) + for q, dirct in Neighbors_dict.items(): + fl_lm_q = station.components[q].instr_LutMan_Flux.get_instr() + waveforms_to_upload = ['park', f'cz_{dirct}'] + load_single_waveform_on_HDAWG(fl_lm_q, waveforms_to_upload) + # upload flux pulses of ancilla qubit + Q_A = station.components[stabilizer_qubit] + fl_lm_q = Q_A.instr_LutMan_Flux.get_instr() + waveforms_to_upload = [] + for dirct in Neighbors_dict.values(): + if dirct == 'NW': + waveforms_to_upload.append('cz_SE') + elif dirct == 'NE': + waveforms_to_upload.append('cz_SW') + elif dirct == 'SW': + waveforms_to_upload.append('cz_NE') + else: # SE + waveforms_to_upload.append('cz_NW') + load_single_waveform_on_HDAWG(fl_lm_q, waveforms_to_upload) + # Prepare parking of spectator qubits + for q in Spec_qubits: + fl_lm_q = station.components[q].instr_LutMan_Flux.get_instr() + load_single_waveform_on_HDAWG(fl_lm_q, 'park') + t_fl = time.time()-t_fl + # prepare timings + t_tim = time.time() + device.prepare_timing() + t_tim = time.time()-t_tim + # Upload mw pulses + t_mw = time.time() + for q in PC_qubits: + Q = station.components[q] + Q._prep_td_sources() + if check_prepare_mw(q, station): + Q.cfg_prepare_mw_awg(False) + Q._prep_mw_pulses() + Q.cfg_prepare_mw_awg(True) + mw_lm = Q.instr_LutMan_MW.get_instr() + mw_lm.set_default_lutmap() + mw_lm.load_phase_pulses_to_AWG_lookuptable() + mw_lm.upload_single_qubit_phase_corrections() + t_mw = time.time()-t_mw + print(f'Preparation time RO:\t{t_ro}') + print(f'Preparation time FL:\t{t_fl}') + print(f'Preparation time TIM:\t{t_tim}') + print(f'Preparation time MW:\t{t_mw}') + return True + +def check_prepare_readout(qubits, station): + ''' + Function to assess weather readout pulses have to be + reuploaded. This is done by looking at the resonator + combinations present in the RO lutmans. + Returns True if preparation is necessary (otherwise + returns False). + ''' + # Assess required readout combinations + ro_lms = [] + resonators_in_lm = {} + for qb_name in qubits: + qb = station.components[qb_name] + # qubit and resonator number are identical + res_nr = qb.cfg_qubit_nr() + ro_lm = qb.instr_LutMan_RO.get_instr() + # Add resonator to list of resonators in lm + if ro_lm not in ro_lms: + ro_lms.append(ro_lm) + resonators_in_lm[ro_lm.name] = [] + resonators_in_lm[ro_lm.name].append(res_nr) + # Check if required resonator combinations are + # present in RO lutmans. + check_list = [] + for ro_lm in ro_lms: + res_combs = ro_lm.resonator_combinations() + check_list.append(resonators_in_lm[ro_lm.name] in res_combs) + return not all(check_list) + +def check_prepare_mw(qubit, station, + lutmap='phase_lutmap'): + ''' + Function to assess weather mw pulses have to be + reuploaded. This is done by looking at each uploaded + waveform in the HDAWG and comparing it to the new + generated. + Returns True if preparation is necessary (otherwise + returns False). + ''' + # Required lutmap for parity check experiments + if lutmap == 'phase_lutmap': + required_lutmap = {0: {'name': 'I', 'theta': 0, 'phi': 0, 'type': 'ge'}, + 1: {'name': 'rX180', 'theta': 180, 'phi': 0, 'type': 'ge'}, + 2: {'name': 'rY180', 'theta': 180, 'phi': 90, 'type': 'ge'}, + 3: {'name': 'rX90', 'theta': 90, 'phi': 0, 'type': 'ge'}, + 4: {'name': 'rY90', 'theta': 90, 'phi': 90, 'type': 'ge'}, + 5: {'name': 'rXm90', 'theta': -90, 'phi': 0, 'type': 'ge'}, + 6: {'name': 'rYm90', 'theta': -90, 'phi': 90, 'type': 'ge'}, + 7: {'name': 'rPhi90', 'theta': 90, 'phi': 0, 'type': 'ge'}, + 8: {'name': 'spec', 'type': 'spec'}, + 9: {'name': 'rPhi90', 'theta': 90, 'phi': 0, 'type': 'ge'}, + 10: {'name': 'rPhi90', 'theta': 90, 'phi': 20, 'type': 'ge'}, + 11: {'name': 'rPhi90', 'theta': 90, 'phi': 40, 'type': 'ge'}, + 12: {'name': 'rPhi90', 'theta': 90, 'phi': 60, 'type': 'ge'}, + 13: {'name': 'rPhi90', 'theta': 90, 'phi': 80, 'type': 'ge'}, + 14: {'name': 'rPhi90', 'theta': 90, 'phi': 100, 'type': 'ge'}, + 15: {'name': 'rPhi90', 'theta': 90, 'phi': 120, 'type': 'ge'}, + 16: {'name': 'rPhi90', 'theta': 90, 'phi': 140, 'type': 'ge'}, + 27: {'name': 'rXm180', 'phi': 0, 'theta': -180, 'type': 'ge'}, + 30: {'name': 'rX23', 'theta': 180, 'phi': 0, 'type': 'fh'}, + 51: {'name': 'phaseCorrLRU', 'type': 'phase'}, + 52: {'name': 'phaseCorrStep1', 'type': 'phase'}, + 53: {'name': 'phaseCorrStep2', 'type': 'phase'}, + 54: {'name': 'phaseCorrStep3', 'type': 'phase'}, + 55: {'name': 'phaseCorrStep4', 'type': 'phase'}, + 56: {'name': 'phaseCorrStep5', 'type': 'phase'}, + 57: {'name': 'phaseCorrStep6', 'type': 'phase'}, + 58: {'name': 'phaseCorrStep7', 'type': 'phase'}, + 59: {'name': 'phaseCorrStep8', 'type': 'phase'}, + 60: {'name': 'phaseCorrNW', 'type': 'phase'}, + 61: {'name': 'phaseCorrNE', 'type': 'phase'}, + 62: {'name': 'phaseCorrSW', 'type': 'phase'}, + 63: {'name': 'phaseCorrSE', 'type': 'phase'}, + 17: {'name': 'rPhi90', 'theta': 90, 'phi': 160, 'type': 'ge'}, + 18: {'name': 'rPhi90', 'theta': 90, 'phi': 180, 'type': 'ge'}, + 19: {'name': 'rPhi90', 'theta': 90, 'phi': 200, 'type': 'ge'}, + 20: {'name': 'rPhi90', 'theta': 90, 'phi': 220, 'type': 'ge'}, + 21: {'name': 'rPhi90', 'theta': 90, 'phi': 240, 'type': 'ge'}, + 22: {'name': 'rPhi90', 'theta': 90, 'phi': 260, 'type': 'ge'}, + 23: {'name': 'rPhi90', 'theta': 90, 'phi': 280, 'type': 'ge'}, + 24: {'name': 'rPhi90', 'theta': 90, 'phi': 300, 'type': 'ge'}, + 25: {'name': 'rPhi90', 'theta': 90, 'phi': 320, 'type': 'ge'}, + 26: {'name': 'rPhi90', 'theta': 90, 'phi': 340, 'type': 'ge'}} + elif lutmap == 'default_lutmap': + required_lutmap = {0: {'name': 'I', 'theta': 0, 'phi': 0, 'type': 'ge'}, + 1: {'name': 'rX180', 'theta': 180, 'phi': 0, 'type': 'ge'}, + 2: {'name': 'rY180', 'theta': 180, 'phi': 90, 'type': 'ge'}, + 3: {'name': 'rX90', 'theta': 90, 'phi': 0, 'type': 'ge'}, + 4: {'name': 'rY90', 'theta': 90, 'phi': 90, 'type': 'ge'}, + 5: {'name': 'rXm90', 'theta': -90, 'phi': 0, 'type': 'ge'}, + 6: {'name': 'rYm90', 'theta': -90, 'phi': 90, 'type': 'ge'}, + 7: {'name': 'rPhi90', 'theta': 90, 'phi': 0, 'type': 'ge'}, + 8: {'name': 'spec', 'type': 'spec'}, + 9: {'name': 'rX12', 'theta': 180, 'phi': 0, 'type': 'ef'}, + 10: {'name': 'square', 'type': 'square'}, + 11: {'name': 'rY45', 'theta': 45, 'phi': 90, 'type': 'ge'}, + 12: {'name': 'rYm45', 'theta': -45, 'phi': 90, 'type': 'ge'}, + 13: {'name': 'rX45', 'theta': 45, 'phi': 0, 'type': 'ge'}, + 14: {'name': 'rXm45', 'theta': -45, 'phi': 0, 'type': 'ge'}, + 15: {'name': 'rX12_90', 'theta': 90, 'phi': 0, 'type': 'ef'}, + 16: {'name': 'rX23_90', 'theta': 90, 'phi': 0, 'type': 'fh'}, + 27: {'name': 'rXm180', 'phi': 0, 'theta': -180, 'type': 'ge'}, + 30: {'name': 'rX23', 'theta': 180, 'phi': 0, 'type': 'fh'}, + 51: {'name': 'phaseCorrLRU', 'type': 'phase'}, + 52: {'name': 'phaseCorrStep1', 'type': 'phase'}, + 53: {'name': 'phaseCorrStep2', 'type': 'phase'}, + 54: {'name': 'phaseCorrStep3', 'type': 'phase'}, + 55: {'name': 'phaseCorrStep4', 'type': 'phase'}, + 56: {'name': 'phaseCorrStep5', 'type': 'phase'}, + 57: {'name': 'phaseCorrStep6', 'type': 'phase'}, + 58: {'name': 'phaseCorrStep7', 'type': 'phase'}, + 59: {'name': 'phaseCorrStep8', 'type': 'phase'}, + 60: {'name': 'phaseCorrNW', 'type': 'phase'}, + 61: {'name': 'phaseCorrNE', 'type': 'phase'}, + 62: {'name': 'phaseCorrSW', 'type': 'phase'}, + 63: {'name': 'phaseCorrSE', 'type': 'phase'}} + else: + raise ValueError("Accepted lutmaps are 'phase_lutmap' and 'default_lutmap'.") + # Assess uploaded lutmap + Q = station.components[qubit] + mw_lm = Q.instr_LutMan_MW.get_instr() + uploaded_lutmap = mw_lm.LutMap() + # compare lutmaps + check_lutmaps = (required_lutmap == uploaded_lutmap) + # Check if all waveforms match + if check_lutmaps: + wf_check_list = {} + mw_lm.generate_standard_waveforms() + for wf_idx, wf in mw_lm._wave_dict.items(): + # Get uploaded waveforms + AWG = mw_lm.AWG.get_instr() + wf_name_I = 'wave_ch{}_cw{:03}'.format(mw_lm.channel_I(), wf_idx) + wf_name_Q = 'wave_ch{}_cw{:03}'.format(mw_lm.channel_Q(), wf_idx) + uploaded_wf_I = AWG.get(wf_name_I) + uploaded_wf_Q = AWG.get(wf_name_Q) + # Check if uploaded wf match new wf + _check_wf_I = all(wf[0]==uploaded_wf_I) + _check_wf_Q = all(wf[1]==uploaded_wf_Q) + wf_check_list[wf_idx] = _check_wf_I and _check_wf_Q + check_waveforms = all(wf_check_list.values()) + return not check_waveforms + else: + return True + +def check_flux_wf_upload(flux_lutman, wave_id): + ''' + Assess if flux waveform needs re-uploading. + This is done by looking at current waveform + in the _wave_dict of the flux lutman. + ''' + # Get present waveform + present_wf = flux_lutman._wave_dict[wave_id] + 0 + # Check new waveform + if "i" == wave_id: + new_wf = flux_lutman._gen_i() + elif "square" == wave_id: + new_wf = flux_lutman._gen_square() + elif "park" == wave_id: + new_wf = flux_lutman._gen_park() + elif "cz" in wave_id: + which_gate = wave_id.split('_')[-1] + new_wf = flux_lutman._gen_cz(which_gate=which_gate) + # Check if waveform lengths are the same + if len(new_wf) == len(present_wf): + # If so, check if all points in waveforms match + return not all(new_wf == present_wf) + else: + return False + +def check_flux_wf_duration(flux_lutman): + ''' + Checks whether waveform duration of lutman has changed. + If so it resets the HDAWG to ensure changes will take effect + ''' + # If current duration is shorter, update duration + if flux_lutman.cfg_max_wf_length() < TWOQ_GATE_DURATION: + flux_lutman.cfg_max_wf_length(TWOQ_GATE_DURATION) + # If duration is higher, update and reset waveforms + # (this is necessary for changes to take effect). + elif flux_lutman.cfg_max_wf_length() > TWOQ_GATE_DURATION: + flux_lutman.cfg_max_wf_length(TWOQ_GATE_DURATION) + awg = flux_lutman.AWG.get_instr() + awg.reset_waveforms_zeros() + print(f'Loading waveforms to match {TWOQ_GATE_DURATION_NS:.0f} '+\ + 'ns gate duration') + flux_lutman.load_waveforms_onto_AWG_lookuptable() + +def make_unipolar_pulse_net_zero(flux_lutman, wave_id): + ''' + Adds appropritate padding amplitude to pulse in + order to achieve net zero area of cz waveform. + ''' + assert 'cz' in wave_id, 'Only meant for cz waveforms' + # Look for waveform + dirct = wave_id.split('_')[-1] + flux_lutman.set(f'vcz_amp_pad_{dirct}', 0) + flux_lutman.generate_standard_waveforms() + wf = flux_lutman._wave_dict[wave_id] + n_samples = flux_lutman.get(f'vcz_amp_pad_samples_{dirct}') + # Set amplitude of padding to achieve net-zeroness + net_area = np.trapz(wf)*1/2.4e9 + time_pad = (flux_lutman.get(f'vcz_time_pad_{dirct}') - n_samples/2.4e9)*2 + amp_pad = -(net_area)/time_pad + # # Ensure amplitude is lower than avoided crossing amp + # assert amp_pad < 0.5 + flux_lutman.set(f'vcz_amp_pad_{dirct}', amp_pad) + +def align_CZ_gate_pulses(qH, qL, station): + ''' + Aligns CZ gate pulses gate qubits and parking pulses. + ''' + # Setup qubits and lutmans + dircts = get_gate_directions(qH, qL) + Parked_qubits = get_parking_qubits(qH, qL) + Q_H = station.components[qH] + Q_L = station.components[qL] + flux_lm_H = Q_H.instr_LutMan_Flux.get_instr() + flux_lm_L = Q_L.instr_LutMan_Flux.get_instr() + Flux_lm_ps = [ station.components[q].instr_LutMan_Flux.get_instr()\ + for q in Parked_qubits ] + # Get gate parameters + tp = flux_lm_H.get(f'vcz_time_single_sq_{dircts[0]}')*2 + n_tmid = int(flux_lm_H.get(f'vcz_time_middle_{dircts[0]}')*2.4e9) + # Align pulses + tmid_swf = swf.flux_t_middle_sweep( + fl_lm_tm = [flux_lm_H, flux_lm_L], + fl_lm_park = Flux_lm_ps, + which_gate = list(dircts), + duration=TWOQ_GATE_DURATION, + time_park=TWOQ_GATE_DURATION-(6/2.4e9), + t_pulse = [tp]) + tmid_swf.set_parameter(n_tmid) + +def plot_wave_dicts(qH: list, + qL: list, + station, + label =''): + + + plt.close('all') + Q_Hs = [station.components[Q] for Q in qH] + Q_Ls = [station.components[Q] for Q in qL] + flux_lm_Hs = [Q_inst.instr_LutMan_Flux.get_instr() for Q_inst in Q_Hs] + flux_lm_Ls = [Q_inst.instr_LutMan_Flux.get_instr() for Q_inst in Q_Ls] + n_colors = 2*len(flux_lm_Hs)+6 + cmap = plt.get_cmap("tab10", n_colors) + + fig, ax = plt.subplots(figsize=(9,5), dpi=120) + ax2 = ax.twiny() + ax.set_title(f"Plot waveforms {qH}_{qL}", y=1.1, fontsize=14) + for i,Q in enumerate(Q_Hs): + dircts = get_gate_directions(Q.name, Q_Ls[i].name) + ax.plot(flux_lm_Hs[i]._wave_dict_dist[f'cz_{dircts[0]}'], + linestyle='-', linewidth=1.5,marker = '.', + markersize=5, color=cmap(i), label=f'{Q.name}-{dircts[0]}') + ax.plot(flux_lm_Ls[i]._wave_dict_dist[f'cz_{dircts[1]}'], + linestyle='--', linewidth=1.5, + markersize=8, color=cmap(i+len(flux_lm_Hs)), label=f'{Q_Ls[i].name}_{dircts[1]}') + for j,q in enumerate(get_parking_qubits(Q.name, Q_Ls[i].name)): + if q not in qH+qL: + ax.plot(station.components[q].instr_LutMan_Flux.get_instr()._wave_dict_dist[f'park'], + linestyle='-', linewidth=1,markersize=3,alpha = 0.6, + color=cmap(j+i+1+len(flux_lm_Hs)), label=f'{q}_Park') + + ax.axhline(0.5, color='k', ls=':', alpha=0.8) + ax.axhline(-0.5, color='k', ls=':', alpha=0.8) + ax.axhline(0, color='k', ls=':', alpha=0.8) + max_len = len(flux_lm_Hs[i]._wave_dict_dist[f'cz_{dircts[0]}']) + ax.set_xticks(np.arange(0, max_len+1, 8)) + ax.set_xlabel("Duration (sampling points)", fontsize=12) + ax.set_yticks(np.arange(-0.5,0.51,0.1)) + ax.set_ylabel("Amplitude (a.u.)", fontsize=12) + # set ticks of top axis according to tick positions of bottom axis, + # but with units of ns + ax2.set_xlim(ax.get_xlim()) + ax2.set_xticks(np.arange(0, max_len+1, 8)) + ax2.set_xticklabels([f"{t:.1f}" for t in 1/2.4 * np.arange(0, max_len+1, 8)], + fontsize=8) + ax2.set_xlabel("Duration (ns)", fontsize=12) + + ax.grid(True) + ax.legend(loc='upper right', fontsize=12) + + plt.tight_layout() + # plt.savefig(r"D:\Experiments\202208_Uran\Figures" + fr"\Flux_Pulses_{label}_{qH}_{qL}.png", format='png') + plt.show() + plt.close('all') + +def save_snapshot_metadata(station, Qubits=None, Qubit_pairs = None, + analyze=False, parity_check=False, + Two_qubit_freq_trajectories=False, + label=None): + ''' + Save snapshot of system and run compile analysis with + summary of performances for single- and two-qubit gates, + parity checks and two-qubit frequency trajectories. + ''' + MC = station.components['MC'] + if not label: + label = 'System_snapshot' + MC.set_measurement_name(label) + with h5d.Data( + name=MC.get_measurement_name(), datadir=MC.datadir() + ) as MC.data_object: + MC.get_measurement_begintime() + MC.save_instrument_settings(MC.data_object) + if Qubits == None: + Qubits = [ + 'D1', 'D2', 'D3', + 'D4', 'D5', 'D6', + 'D7', 'D8', 'D9', + 'Z1', 'Z2', 'Z3', 'Z4', + 'X1', 'X2', 'X3', 'X4', + ] + + if Qubit_pairs == None: + Qubit_pairs = [ + ['D4', 'Z1'], + ['D5', 'Z1'], + ['Z1', 'D1'], + ['Z1', 'D2'], + ['D6', 'Z2'], + ['Z2', 'D3'], + ['D4', 'Z3'], + ['Z3', 'D7'], + ['D5', 'Z4'], + ['D6', 'Z4'], + ['Z4', 'D8'], + ['Z4', 'D9'], + ['X1', 'D1'], + ['X1', 'D2'], + ['D6', 'X2'], + ['D5', 'X2'], + ['X2', 'D3'], + ['X2', 'D2'], + ['D5', 'X3'], + ['D4', 'X3'], + ['X3', 'D8'], + ['X3', 'D7'], + ['X4', 'D9'], + ['X4', 'D8'], + ] + # Plot single- and two-qubit gate benchmarks + if analyze: + ma2.gbta.SingleQubitGBT_analysis(Qubits=Qubits) + ma2.gbta.TwoQubitGBT_analysis(Qubit_pairs=Qubit_pairs) + # Plot two-qubit gate frequency trajectories + if Two_qubit_freq_trajectories: + ma2.tqg.TwoQubitGate_frequency_trajectory_analysis(Qubit_pairs=Qubit_pairs) + # Plot parity-check benchmarks + if parity_check: + ma2.gbta.ParityCheckGBT_analysis(Stabilizers=['Z1', 'Z2', 'Z3', 'Z4', 'X1', 'X2', 'X3', 'X4']) + return True + + +def DIO_calibration(station, force: bool = False): + ''' + Checks for DIO errors in all instruments and calibrates + them if error is found. + ''' + # Get all intruments + no_error = True + awgs_with_errors = [] + cc = station.components['cc'] + UHFQC_1 = station.components['UHFQC_1'] + UHFQC_2 = station.components['UHFQC_2'] + UHFQC_3 = station.components['UHFQC_3'] + UHFQC_4 = station.components['UHFQC_4'] + AWG8_8481 = station.components['AWG8_8481'] + AWG8_8068 = station.components['AWG8_8068'] + AWG8_8074 = station.components['AWG8_8074'] + AWG8_8076 = station.components['AWG8_8076'] + AWG8_8499 = station.components['AWG8_8499'] + AWG8_8320 = station.components['AWG8_8320'] + AWG8_8279 = station.components['AWG8_8279'] + AWG8_8071 = station.components['AWG8_8071'] + device = station.components['device'] + + # Helper function + def _prep_awg(awg): + ''' + Helper function to prepare AWG. + This needs to be performed after DIO calibration. + ''' + for k in station.components.keys(): + if ('MW_lutman' in k) or ('flux_lm' in k): + lutman = station.components[k] + if lutman.AWG() == awg: + qubit_name = lutman.name.split('_')[-1] + # qubit = station.components[qubit_name] + # lutman.load_waveforms_onto_AWG_lookuptable() + device.prepare_for_timedomain(qubits=[qubit_name], + prepare_for_readout=False) + + ############################################ + # UHFQC DIO calibration + ############################################ + # UHFQC_1 + UHFQC_1.check_errors() + _errors = UHFQC_1._errors + # if 'AWGDIOTIMING' in _errors.keys(): + if _errors != {} or force: + if _errors != {}: + awgs_with_errors.append('UHFQC_1') + # no_error = False + UHFQC_1._errors = {} + print(f'Calibrating DIO on UHFQC_1.') + try: + DIO.calibrate(sender=cc, receiver=UHFQC_1, sender_dio_mode='uhfqa') + print(UHFQC_1.name, UHFQC_1._get_dio_calibration_delay(), 8) + except: + print(f'Failed DIO calibration on {UHFQC_1.name}!') + UHFQC_1._set_dio_calibration_delay(8) + UHFQC_1.clear_errors() + UHFQC_1.sigins_0_range(0.2) + UHFQC_1.sigins_1_range(0.2) + station.components['RO_lutman_1'].load_DIO_triggered_sequence_onto_UHFQC() + # UHFQC_2 + UHFQC_2.check_errors() + _errors = UHFQC_2._errors + # if 'AWGDIOTIMING' in _errors.keys(): + if _errors != {} or force: + # no_error = False + if _errors != {}: + awgs_with_errors.append('UHFQC_2') + UHFQC_2._errors = {} + print(f'Calibrating DIO on UHFQC_2.') + try: + DIO.calibrate(sender=cc, receiver=UHFQC_2, sender_dio_mode='uhfqa') + print(UHFQC_2.name, UHFQC_2._get_dio_calibration_delay(), 2) + except: + print(f'Failed DIO calibration on {UHFQC_2.name}!') + UHFQC_2._set_dio_calibration_delay(2) + UHFQC_2.clear_errors() + UHFQC_2.sigins_0_range(0.3) + UHFQC_2.sigins_1_range(0.3) + station.components['RO_lutman_2'].load_DIO_triggered_sequence_onto_UHFQC() + # UHFQC_3 + UHFQC_3.check_errors() + _errors = UHFQC_3._errors + # if 'AWGDIOTIMING' in _errors.keys(): + if _errors != {} or force: + if _errors != {}: + awgs_with_errors.append('UHFQC_3') + # no_error = False # this is commented because some holdoff error cannot be fixed + UHFQC_3._errors = {} + print(f'Calibrating DIO on UHFQC_3.') + try: + DIO.calibrate(sender=cc, receiver=UHFQC_3, sender_dio_mode='uhfqa') + print(UHFQC_3.name, UHFQC_3._get_dio_calibration_delay(), 2) + except: + print(f'Failed DIO calibration on {UHFQC_3.name}!') + UHFQC_3._set_dio_calibration_delay(2) + UHFQC_3.clear_errors() + UHFQC_3.sigins_0_range(0.5) + UHFQC_3.sigins_1_range(0.5) + station.components['RO_lutman_3'].load_DIO_triggered_sequence_onto_UHFQC() + # UHFQC_4 + UHFQC_4.check_errors() + _errors = UHFQC_4._errors + # if 'AWGDIOTIMING' in _errors.keys(): + if _errors != {} or force: + # no_error = False + if _errors != {}: + awgs_with_errors.append('UHFQC_4') + UHFQC_4._errors = {} + print(f'Calibrating DIO on UHFQC_4.') + try: + DIO.calibrate(sender=cc, receiver=UHFQC_4, sender_dio_mode='uhfqa') + print(UHFQC_4.name, UHFQC_4._get_dio_calibration_delay(), 7) + except: + print(f'Failed DIO calibration on {UHFQC_4.name}!') + UHFQC_4._set_dio_calibration_delay(7) + UHFQC_4.clear_errors() + UHFQC_4.sigins_0_range(0.4) + UHFQC_4.sigins_1_range(0.4) + station.components['RO_lutman_4'].load_DIO_triggered_sequence_onto_UHFQC() + + ############################################ + # MW HDAWG DIO calibration + ############################################ + # AWG8_8481 + AWG8_8481.check_errors() + _errors = AWG8_8481._errors + # if 'AWGDIOTIMING' in _errors.keys(): + if _errors != {} or force: + if _errors != {}: + awgs_with_errors.append('AWG8_8481') + no_error = False + AWG8_8481._errors = {} + print(f'Calibrating DIO on AWG8_8481.') + AWG8_8481.set('dios_0_interface', 0) + AWG8_8481.set('dios_0_interface', 1) + AWG8_8481.clear_errors() + try: + DIO.calibrate(sender=cc, receiver=AWG8_8481, sender_dio_mode='awg8-mw-direct-iq') + print(AWG8_8481.name, AWG8_8481._get_dio_calibration_delay(), 6) + except: + print(f'Failed DIO calibration on {AWG8_8481.name}!') + AWG8_8481._set_dio_calibration_delay(6) + AWG8_8481.clear_errors() + _prep_awg('AWG8_8481') + # AWG8_8068 + AWG8_8068.check_errors() + _errors = AWG8_8068._errors + # if 'AWGDIOTIMING' in _errors.keys(): + if _errors != {} or force: + if _errors != {}: + awgs_with_errors.append('AWG8_8068') + no_error = False + AWG8_8068._errors = {} + print(f'Calibrating DIO on AWG8_8068.') + AWG8_8068.set('dios_0_interface', 0) + AWG8_8068.set('dios_0_interface', 1) + AWG8_8068.clear_errors() + try: + DIO.calibrate(sender=cc, receiver=AWG8_8068, sender_dio_mode='awg8-mw-direct-iq') + print(AWG8_8068.name, AWG8_8068._get_dio_calibration_delay(), 4) + except: + print(f'Failed DIO calibration on {AWG8_8068.name}!') + AWG8_8068._set_dio_calibration_delay(4) + AWG8_8068.clear_errors() + _prep_awg('AWG8_8068') + # AWG8_8074 + AWG8_8074.check_errors() + _errors = AWG8_8074._errors + # if 'AWGDIOTIMING' in _errors.keys(): + if _errors != {} or force: + if _errors != {}: + awgs_with_errors.append('AWG8_8074') + no_error = False + AWG8_8074._errors = {} + print(f'Calibrating DIO on AWG8_8074.') + AWG8_8074.set('dios_0_interface', 0) + AWG8_8074.set('dios_0_interface', 1) + AWG8_8074.clear_errors() + try: + DIO.calibrate(sender=cc, receiver=AWG8_8074, sender_dio_mode='awg8-mw-direct-iq') + print(AWG8_8074.name, AWG8_8074._get_dio_calibration_delay(), 6) + except: + print(f'Failed DIO calibration on {AWG8_8074.name}!') + AWG8_8074._set_dio_calibration_delay(6) + AWG8_8074.clear_errors() + _prep_awg('AWG8_8074') + # AWG8_8076 + AWG8_8076.check_errors() + _errors = AWG8_8076._errors + # if 'AWGDIOTIMING' in _errors.keys(): + if _errors != {} or force: + if _errors != {}: + awgs_with_errors.append('AWG8_8076') + no_error = False + AWG8_8076._errors = {} + print(f'Calibrating DIO on AWG8_8076.') + AWG8_8076.set('dios_0_interface', 0) + AWG8_8076.set('dios_0_interface', 1) + AWG8_8076.clear_errors() + try: + DIO.calibrate(sender=cc, receiver=AWG8_8076, sender_dio_mode='awg8-mw-direct-iq') + print(AWG8_8076.name, AWG8_8076._get_dio_calibration_delay(), 4) + except: + print(f'Failed DIO calibration on {AWG8_8076.name}!') + AWG8_8076._set_dio_calibration_delay(4) + AWG8_8076.clear_errors() + _prep_awg('AWG8_8076') + # AWG8_8499 + AWG8_8499.check_errors() + _errors = AWG8_8499._errors + # if 'AWGDIOTIMING' in _errors.keys(): + if _errors != {} or force: + if _errors != {}: + awgs_with_errors.append('AWG8_8499') + no_error = False + AWG8_8499._errors = {} + print(f'Calibrating DIO on AWG8_8499.') + AWG8_8499.set('dios_0_interface', 0) + AWG8_8499.set('dios_0_interface', 1) + AWG8_8499.clear_errors() + try: + DIO.calibrate(sender=cc, receiver=AWG8_8499, sender_dio_mode='awg8-mw-direct-iq') + print(AWG8_8499.name, AWG8_8499._get_dio_calibration_delay(), 6) + except: + print(f'Failed DIO calibration on {AWG8_8499.name}!') + AWG8_8499._set_dio_calibration_delay(6) + AWG8_8499.clear_errors() + _prep_awg('AWG8_8499') + + ############################################ + # Flux HDAWG DIO calibration + ############################################ + # AWG8_8279 + AWG8_8279.check_errors() + _errors = AWG8_8279._errors + # if 'AWGDIOTIMING' in _errors.keys(): + if _errors != {} or force: + if _errors != {}: + awgs_with_errors.append('AWG8_8279') + no_error = False + AWG8_8279._errors = {} + print(f'Calibrating DIO on AWG8_8279.') + AWG8_8279.set('dios_0_interface', 0) + AWG8_8279.set('dios_0_interface', 1) + AWG8_8279.clear_errors() + try: + DIO.calibrate(sender=cc, receiver=AWG8_8279, sender_dio_mode='awg8-flux') + print(AWG8_8279.name, AWG8_8279._get_dio_calibration_delay(), 6) + except: + print(f'Failed DIO calibration on {AWG8_8279.name}!') + AWG8_8279._set_dio_calibration_delay(6) + AWG8_8279_channels = [0, 1, 2, 3, 4, 5, 6, 7] + for this_ch in AWG8_8279_channels: + AWG8_8279.setd('sigouts/%d/precompensation/enable' % (int(this_ch)), True) + AWG8_8279.setd('sigouts/%d/precompensation/exponentials/0/enable' % (int(this_ch)), True) + AWG8_8279.setd('sigouts/%d/precompensation/exponentials/1/enable' % (int(this_ch)), True) + AWG8_8279.setd('sigouts/%d/precompensation/exponentials/2/enable' % (int(this_ch)), True) + AWG8_8279.setd('sigouts/%d/precompensation/exponentials/3/enable' % (int(this_ch)), True) + AWG8_8279.setd('sigouts/%d/precompensation/exponentials/4/enable' % (int(this_ch)), True) + AWG8_8279.setd('sigouts/%d/precompensation/exponentials/5/enable' % (int(this_ch)), True) + AWG8_8279.setd('sigouts/%d/precompensation/exponentials/6/enable' % (int(this_ch)), True) + AWG8_8279.setd('sigouts/%d/precompensation/exponentials/7/enable' % (int(this_ch)), True) + AWG8_8279.setd('sigouts/%d/precompensation/fir/enable' % (int(this_ch)), True) + AWG8_8279.set('sigouts_{}_delay'.format(int(this_ch)), 0e-9 + 4 * 10 / 3 * 1e-9 - 2 * 3.33e-9) + AWG8_8279.clear_errors() + _prep_awg('AWG8_8279') + # AWG8_8320 + AWG8_8320.check_errors() + _errors = AWG8_8320._errors + # if 'AWGDIOTIMING' in _errors.keys(): + if _errors != {} or force: + if _errors != {}: + awgs_with_errors.append('AWG8_8320') + no_error = False + AWG8_8320._errors = {} + print(f'Calibrating DIO on AWG8_8320.') + AWG8_8320.set('dios_0_interface', 0) + AWG8_8320.set('dios_0_interface', 1) + AWG8_8320.clear_errors() + try: + DIO.calibrate(sender=cc, receiver=AWG8_8320, sender_dio_mode='awg8-flux') + print(AWG8_8320.name, AWG8_8320._get_dio_calibration_delay(), 1) + except: + print(f'Failed DIO calibration on {AWG8_8320.name}!') + AWG8_8320._set_dio_calibration_delay(1) + AWG8_8320_channels = [0, 1, 2, 3, 4, 5, 6, 7] + for this_ch in AWG8_8320_channels: + AWG8_8320.setd('sigouts/%d/precompensation/enable' % (int(this_ch)), True) + AWG8_8320.setd('sigouts/%d/precompensation/exponentials/0/enable' % (int(this_ch)), True) + AWG8_8320.setd('sigouts/%d/precompensation/exponentials/1/enable' % (int(this_ch)), True) + AWG8_8320.setd('sigouts/%d/precompensation/exponentials/2/enable' % (int(this_ch)), True) + AWG8_8320.setd('sigouts/%d/precompensation/exponentials/3/enable' % (int(this_ch)), True) + AWG8_8320.setd('sigouts/%d/precompensation/exponentials/4/enable' % (int(this_ch)), True) + AWG8_8320.setd('sigouts/%d/precompensation/exponentials/5/enable' % (int(this_ch)), True) + AWG8_8320.setd('sigouts/%d/precompensation/exponentials/6/enable' % (int(this_ch)), True) + AWG8_8320.setd('sigouts/%d/precompensation/exponentials/7/enable' % (int(this_ch)), True) + AWG8_8320.setd('sigouts/%d/precompensation/fir/enable' % (int(this_ch)), True) + AWG8_8320.set('sigouts_{}_delay'.format(int(this_ch)), 18e-9 + 2 * 3.33e-9) + AWG8_8320.clear_errors() + _prep_awg('AWG8_8320') + # AWG8_8071 + AWG8_8071.check_errors() + _errors = AWG8_8071._errors + # if 'AWGDIOTIMING' in _errors.keys(): + if _errors != {} or force: + if _errors != {}: + awgs_with_errors.append('AWG8_8071') + no_error = False + AWG8_8071._errors = {} + print(f'Calibrating DIO on AWG8_8071.') + AWG8_8071.set('dios_0_interface', 0) + AWG8_8071.set('dios_0_interface', 1) + AWG8_8071.clear_errors() + try: + DIO.calibrate(sender=cc, receiver=AWG8_8071, sender_dio_mode='awg8-flux') + print(AWG8_8071.name, AWG8_8071._get_dio_calibration_delay(), 6) + except: + print(f'Failed DIO calibration on {AWG8_8071.name}!') + AWG8_8071._set_dio_calibration_delay(6) + AWG8_8071_channels = [0, 1, 2, 3, 4, 5, 6, 7] + for this_ch in AWG8_8071_channels: + AWG8_8071.setd('sigouts/%d/precompensation/enable' % (int(this_ch)), True) + AWG8_8071.setd('sigouts/%d/precompensation/exponentials/0/enable' % (int(this_ch)), True) + AWG8_8071.setd('sigouts/%d/precompensation/exponentials/1/enable' % (int(this_ch)), True) + AWG8_8071.setd('sigouts/%d/precompensation/exponentials/2/enable' % (int(this_ch)), True) + AWG8_8071.setd('sigouts/%d/precompensation/exponentials/3/enable' % (int(this_ch)), True) + AWG8_8071.setd('sigouts/%d/precompensation/fir/enable' % (int(this_ch)), True) + AWG8_8071.set('sigouts_{}_delay'.format(int(this_ch)), 7e-9 - 2 * 3.33e-9) + AWG8_8071.clear_errors() + _prep_awg('AWG8_8071') + # apply the right delays + device.tim_flux_latency_0(-240e-9 - 4 * 36.67e-9) # 8320 + device.tim_flux_latency_1(-240e-9 - 4 * 36.67e-9) # 8279 + device.tim_flux_latency_2(-240e-9) # 8071 + device.tim_mw_latency_0(0) # 8076 + device.tim_mw_latency_1(-10e-9) # 8074 + device.tim_mw_latency_2(-15e-9) # 8499 + device.tim_mw_latency_3(0) # 8068 + device.tim_mw_latency_4(-10e-9) # 8481 + device.prepare_timing() + return no_error, awgs_with_errors + +############################################################################### +# LRU calibration graph +############################################################################### +class LRU_gate_calibration(AutoDepGraph_DAG): + def __init__(self, + name: str, + Qubits: str, + station, + **kwargs): + super().__init__(name, **kwargs) + self.station = station + self.create_dep_graph(Qubits=Qubits) + + def create_dep_graph(self, Qubits:str): + ''' + Dependency graph for the calibration of + single-qubit gates. + ''' + print(f'Creating dependency graph for LRU gate calibration') + ############################## + # Grah nodes + ############################## + module_name = 'pycqed.instrument_drivers.meta_instrument.Surface17_dependency_graph' + for qubit in Qubits: + self.add_node(f'{qubit} Prepare for LRU calibration', + calibrate_function=module_name+'.prepare_for_LRU_calibration', + calibrate_function_args={ + 'qubit' : qubit, + 'station': self.station, + }) + + self.add_node(f'{qubit} Sweep LRU Frequency', + calibrate_function=module_name+'.LRU_frequency_wrapper', + calibrate_function_args={ + 'qubit' : qubit, + 'station': self.station, + }) + + self.add_node(f'{qubit} LRU drive mixer calibration', + calibrate_function=module_name+'.LRU_mixer_offset_wrapper', + calibrate_function_args={ + 'qubit' : qubit, + 'station': self.station, + }) + ############################## + # Node depdendencies + ############################## + self.add_edge(f'{qubit} Sweep LRU Frequency', + f'{qubit} Prepare for LRU calibration') + + self.add_edge(f'{qubit} LRU drive mixer calibration', + f'{qubit} Sweep LRU Frequency') + # Add master node that saves snapshot + self.add_node(f'Save snapshot', + calibrate_function=module_name+'.save_snapshot_metadata', + calibrate_function_args={ + 'station': self.station, + }) + for qubit in Qubits: + self.add_edge(f'Save snapshot', + f'{qubit} LRU drive mixer calibration') + # Add dependencies between qubits + # D1 and D3 share the AWG channel from D4 + # self.add_edge(f'D1 Prepare for LRU calibration', + # f'D4 LRU drive mixer calibration') + # self.add_edge(f'D3 Prepare for LRU calibration', + # f'D4 LRU drive mixer calibration') + # # D8 and D9 share the AWG channel from D4 + # self.add_edge(f'D8 Prepare for LRU calibration', + # f'D5 LRU drive mixer calibration') + # self.add_edge(f'D9 Prepare for LRU calibration', + # f'D5 LRU drive mixer calibration') + # # D2 and D7 share the AWG channel from D6 + # self.add_edge(f'D2 Prepare for LRU calibration', + # f'D6 LRU drive mixer calibration') + # self.add_edge(f'D7 Prepare for LRU calibration', + # f'D6 LRU drive mixer calibration') + + ############################## + # Create graph + ############################## + self.cfg_plot_mode = 'svg' + self.update_monitor() + self.cfg_svg_filename + url = self.open_html_viewer() + print('Dependency graph created at ' + url) + + +def prepare_for_LRU_calibration(qubit:str, station): + ''' + Initial function to prepare qubit for calibration. + We will set all relevant parameters for mw and readout. + This is such that we only perform full preparation of + the qubit once in the graph and all additional calibrated + parameters are uploaded individually making the whole + procedure time efficient. + ''' + Q_inst = station.components[qubit] + # Dictionary with LRU amplitude parameters + LRU_param_dict = { + # High frequency qubits + 'D4': {'ch_range': 5, 'ch_amp': 0.95}, + 'D5': {'ch_range': 3, 'ch_amp': 1.00}, + 'D6': {'ch_range': 5, 'ch_amp': 0.80}, + # 'D4': {'ch_range': 3, 'ch_amp': 1.00}, + # 'D5': {'ch_range': 3, 'ch_amp': 1.00}, + # 'D6': {'ch_range': 3, 'ch_amp': 1.00}, + # Low frequency qubits (these parameters are not used) + 'D1': {'ch_range': .8, 'ch_amp': 1}, + 'D2': {'ch_range': .8, 'ch_amp': 1}, + 'D3': {'ch_range': .8, 'ch_amp': 1}, + 'D7': {'ch_range': .8, 'ch_amp': 1}, + 'D8': {'ch_range': .8, 'ch_amp': 1}, + 'D9': {'ch_range': .8, 'ch_amp': 1}, + # Mid frequency qubits + 'Z1': {'ch_range': 5, 'ch_amp': 0.85}, + 'Z2': {'ch_range': 5, 'ch_amp': 0.60}, + 'Z3': {'ch_range': 5, 'ch_amp': 0.60}, + 'Z4': {'ch_range': 5, 'ch_amp': 0.80}, + 'X1': {'ch_range': 5, 'ch_amp': 0.50}, + 'X2': {'ch_range': 5, 'ch_amp': 0.50}, + 'X3': {'ch_range': 5, 'ch_amp': 0.50}, + 'X4': {'ch_range': 5, 'ch_amp': 0.50}, + } + ############################################ + # Set initial parameters for calibration + ############################################ + Q_inst.ro_acq_averages(2**10) + Q_inst.ro_soft_avg(1) + Q_inst.ro_acq_weight_type('optimal IQ') + Q_inst.ro_acq_digitized(False) + station.components['MC'].live_plot_enabled(False) + station.components['nested_MC'].live_plot_enabled(False) + # Check if RO pulse has been uploaded onto UHF + # (We do this by checking if the resonator + # combinations of the RO lutman contain + # exclusively this qubit). + RO_lm = Q_inst.instr_LutMan_RO.get_instr() + _res_combs = RO_lm.resonator_combinations() + if _res_combs != [[Q_inst.cfg_qubit_nr()]]: + Q_inst.prepare_readout() + else: + # Just update detector functions (for avg and IQ) + Q_inst._prep_ro_instantiate_detectors() + # Set microwave lutman + Q_lm = Q_inst.instr_LutMan_MW.get_instr() + Q_lm.set_default_lutmap() + ############################################ + # Set LRU parameters + ############################################ + Q_inst.LRU_duration(220e-9) + Q_inst.LRU_duration_rise(30e-9) + Q_inst.LRU_amplitude(1) + Q_inst.LRU_channel_amp(LRU_param_dict[qubit]['ch_amp']) + Q_inst.LRU_channel_range(LRU_param_dict[qubit]['ch_range']) + # Set LRU LO powers + # High frequency + station.components['MW_LO_6'].power(25) # [D4, D5, D6] + station.components['MW_LO_6'].frequency(5.192e9) # + # Mid frequency + station.components['MW_LO_7'].power(25) # [Z2, Z3, Z4, X3] + station.components['MW_LO_7'].frequency(3.888e9) # + station.components['MW_LO_10'].power(25) # [Z1, X4] + station.components['MW_LO_10'].frequency(4.095e9) # + # station.components['MW_LO_15'].power(15) # X2 + # station.components['MW_LO_15'].power(15) # X1 + # # Low frequency + # station.components['MW_LO_11'].power(6) # D1 + # station.components['MW_LO_9'].power(8) # D2 + # station.components['MW_LO_12'].power(10) # D3 + # station.components['MW_LO_8'].power(10) # D7 + # station.components['MW_LO_14'].power(10) # D8 + # station.components['MW_LO_13'].power(9) # D9 + ############################################ + # Prepare for timedomain + ############################################ + # For low frequency qubits that share AWG channels + if Q_inst.name in ['D1', 'D3']: + station.components['D4']._prep_td_sources() + station.components['D4']._prep_LRU_pulses() + if Q_inst.name in ['D8', 'D9']: + station.components['D5']._prep_td_sources() + station.components['D5']._prep_LRU_pulses() + if Q_inst.name in ['D2', 'D7']: + station.components['D6']._prep_td_sources() + station.components['D6']._prep_LRU_pulses() + # Prepare qubit + Q_inst._prep_td_sources() + Q_inst._prep_mw_pulses() + if Q_inst.instr_LutMan_LRU(): + Q_inst._prep_LRU_pulses() + return True + + +def LRU_frequency_wrapper(qubit:str, station): + ''' + Wrapper function around LRU frequency sweep calibration. + Returns True if successful calibration otherwise + returns False. + ''' + Q_inst = station.components[qubit] + station.components['MC'].live_plot_enabled(False) + station.components['nested_MC'].live_plot_enabled(False) + # Set initial parameters for calibration + Q_inst.ro_soft_avg(1) + Q_inst.ro_acq_weight_type('optimal IQ') + Q_inst._prep_ro_instantiate_detectors() + # Run experiment + outcome = Q_inst.calibrate_LRU_frequency( + frequencies = np.linspace(-30e6, 30e6, 121)+Q_inst.LRU_freq(), + nr_shots_per_point=2**10, + update=True, + prepare_for_timedomain=False, + disable_metadata=True) + return outcome + + +def LRU_mixer_offset_wrapper(qubit:str, station): + ''' + Wrapper function around LRU mixer offset calibration. + Returns True if successful calibration otherwise + returns False. + ''' + Q_inst = station.components[qubit] + station.components['MC'].live_plot_enabled(False) + station.components['nested_MC'].live_plot_enabled(False) + # Set initial parameters for calibration + Q_inst.ro_soft_avg(1) + Q_inst._prep_td_sources() + if Q_inst.instr_LutMan_LRU(): + Q_inst._prep_LRU_pulses() + # Run experiment + if Q_inst.name in ['D4', 'D5', 'D6']: + connect(f'{Q_inst.name}_LRU') + outcome = Q_inst.calibrate_mixer_offsets_LRU( + update=True, + ftarget=-110, + disable_metadata=True) + # If low frequency qubits perform only single channel mixer calibration + elif Q_inst.name in ['D1', 'D2', 'D3', 'D7', 'D8', 'D9']: + outcome = Q_inst.calibrate_mixer_offset_LRU_single_channel(prepare = False, + currents = np.linspace(-10e-3, 5e-3, 21), + disable_metadata=True, + adaptive_sampling = True, + ch_par = station.components['LRUcurrent'].parameters[f'LRU_{Q_inst.name}']) + # This was not implemented yet + elif Q_inst.name in ['Z1', 'Z2', 'Z3', 'Z4', 'X1', 'X2', 'X3', 'X4']: + outcome = True + return outcome + + +def measure_LRU_wrapper(qubit:str, station): + ''' + Wrapper function around LRU measurement. + ''' + Q_inst = station.components[qubit] + station.components['MC'].live_plot_enabled(False) + station.components['nested_MC'].live_plot_enabled(False) + # Set parameters for measurement + Q_inst.ro_soft_avg(1) + Q_inst.ro_acq_averages(2**12) + Q_inst.ro_acq_weight_type('optimal IQ') + Q_inst.ro_acq_digitized(False) + # Check if RO pulse has been uploaded onto UHF + # (We do this by checking if the resonator + # combinations of the RO lutman contain + # exclusively this qubit). + RO_lm = Q_inst.instr_LutMan_RO.get_instr() + _res_combs = RO_lm.resonator_combinations() + if _res_combs != [[Q_inst.cfg_qubit_nr()]]: + Q_inst.prepare_readout() + else: + # Just update detector functions (for avg and IQ) + Q_inst._prep_ro_instantiate_detectors() + # Set microwave lutman + Q_lm = Q_inst.instr_LutMan_MW.get_instr() + Q_lm.set_default_lutmap() + # upload lru offset params + Q_inst._prep_td_sources() + Q_inst._prep_mw_pulses() + if Q_inst.instr_LutMan_LRU(): + Q_inst._prep_LRU_pulses() + outcome = Q_inst.measure_LRU_experiment(prepare_for_timedomain = False, + disable_metadata = True) + return outcome diff --git a/pycqed/instrument_drivers/meta_instrument/Z3_D7_unipolar_calibration.py b/pycqed/instrument_drivers/meta_instrument/Z3_D7_unipolar_calibration.py new file mode 100644 index 0000000000..bfc1d8052f --- /dev/null +++ b/pycqed/instrument_drivers/meta_instrument/Z3_D7_unipolar_calibration.py @@ -0,0 +1,207 @@ +################################################################### +# Spectroscopy experiments +#################################################################### +MC.live_plot_enabled(True) +nested_MC.live_plot_enabled(True) +# Apply spectroscopy settings +file_cfg = gc.generate_config(in_filename=input_file, + out_filename=config_fn, + mw_pulse_duration=20, + ro_duration=3000, + flux_pulse_duration=40, + init_duration=200000) +qubit = X3 +qubit.ro_acq_averages(2**8) +qubit.ro_pulse_length(2e-6) +qubit.ro_acq_integration_length(2e-6) +qubit.instr_spec_source(qubit.instr_LO_mw()) +qubit.instr_FluxCtrl('AWG8_8320') +qubit.fl_dc_ch('sigouts_3_offset') + + +# Resonator spectroscocpy +qubit.find_resonator_frequency(update=True,use_min=True) + +# Perform spectroscopy experimentsq +qubit.measure_qubit_frequency_dac_scan(freqs=np.arange(-30e6,30e6,0.25e6)+qubit.freq_qubit(), + dac_values=np.linspace(-10e-3, 10e-3, 11)+qubit.fl_dc_I0(), mode='CW', update=False) +qubit.find_frequency(disable_metadata=True, spec_mode='CW', + freqs = np.arange(-50e6, 50e6, 0.25e6)+qubit.freq_qubit()) + +# # Select frequency detuning based on measured flux arc +# for detuning in np.arange(0, 200e6, 20e6): +# freq_qubit = 6.055e9-detuning +# current = np.max((a.dac_fit_res['fit_polynomial']-freq_qubit).roots) +# fluxcurrent.FBL_X4(np.real(current)) +# qubit.find_frequency(disable_metadata=True, spec_mode='CW', f_step=.1e6) + +# Calibrate single qubit gate +# Z3.calibrate_mw_pulse_amplitude_coarse() +S17GBT.Flipping_wrapper(qubit=Z3.name, station=station) +Z3.calibrate_frequency_ramsey(disable_metadata=True) +S17GBT.Motzoi_wrapper(qubit=Z3.name, station=station) +S17GBT.Flipping_wrapper(qubit=Z3.name, station=station) +S17GBT.AllXY_wrapper(qubit=Z3.name, station=station) +S17GBT.SSRO_wrapper(qubit=Z3.name, station=station) +S17GBT.T1_wrapper(qubit=Z3.name, station=station) +S17GBT.T2_wrapper(qubit=Z3.name, station=station) +S17GBT.Randomized_benchmarking_wrapper(qubit=Z3.name, station=station) + + +# Recover timedomain settings +Z3.ro_pulse_length(250e-9) +Z3.ro_acq_integration_length(500e-9) +Z3.ro_freq(7.599e9) +file_cfg = gc.generate_config(in_filename=input_file, + out_filename=config_fn, + mw_pulse_duration=20, + ro_duration=600, + flux_pulse_duration=80, + init_duration=200000) +MW_LO_1.frequency(6.200e9) +MW_LO_2.frequency(6.140e9) +MW_LO_3.frequency(5.090e9) +MW_LO_4.frequency(5070000000.0-30e6) +MW_LO_5.frequency(6.950e9) +Z3.prepare_for_timedomain() + + +# Reset filters +lin_dist_kern_Z3.reset_kernels() +device.prepare_for_timedomain(qubits=['Z3']) +# Measure cryoscope +S17GBT.Flux_arc_wrapper(Qubit='Z3', station=station) +S17GBT.Cryoscope_wrapper(Qubit='Z3', station=station, max_duration=500e-9) + +# Set filters manually +reload(ma2) +a = ma2.cv2.multi_qubit_cryoscope_analysis( + label='Cryoscope', + update_FIRs=False, + update_IIRs=True, + extract_only=False) + +for qubit, fltr in a.proc_data_dict['exponential_filter'].items(): + lin_dist_kern = device.find_instrument(f'lin_dist_kern_{qubit}') + filter_dict = {'params': fltr, + 'model': 'exponential', 'real-time': True } + # Check wich is the first empty exponential filter + for i in range(4): + _fltr = lin_dist_kern.get(f'filter_model_0{i}') + if _fltr == {}: + lin_dist_kern.set(f'filter_model_0{i}', filter_dict) + # return True + break + else: + print(f'filter_model_0{i} used.') +# print('All exponential filter tabs are full. Filter not updated.') + + + +flux_lm_Z3.vcz_use_net_zero_pulse_NE(False) +flux_lm_Z3.vcz_use_asymmetric_amp_NE(False) +flux_lm_Z3.vcz_asymmetry_NE(0) +S17GBT.Calibrate_CZ_gate( + qH='Z3', qL='D7', station=station, + park_distance = 700e6, + apply_parking_settings = True, + tmid_offset_samples = 5, + calibration_type = 'full', + benchmark = True, + recompile=True, + live_plot_enabled=True, + asymmetry_compensation = False, + calibrate_asymmetry = False, + ) + +flux_lm_Z3.vcz_use_net_zero_pulse_NE(False) +S17GBT.Single_qubit_phase_calibration_wrapper(qH='Z3', qL='D7', + station=station, fine_cphase_calibration=False) + +flux_lm_Z3.vcz_use_net_zero_pulse_NE(True) +S17GBT.Single_qubit_phase_calibration_wrapper(qH='Z3', qL='D7', + station=station, fine_cphase_calibration=False) + +S17GBT.TwoQ_Randomized_benchmarking_wrapper(qH='Z3', qL='D7', + station=station, recompile=False) + +############################################## +# Add 6 dB attn +############################################## +gen.load_settings_onto_instrument_v2(lin_dist_kern_Z3, timestamp='20230830_104942') +bias = 0.1601765900850296 +Z3.instr_FluxCtrl('AWG8_8279') +Z3.fl_dc_ch('sigouts_4_offset') +Z3.instr_FluxCtrl.get_instr().set(Z3.fl_dc_ch(), bias) +# Measure cryoscope +flux_lm_Z3.q_polycoeffs_freq_01_det(np.array([ 8.07411150e+09, -3.21463324e+06, -5.55337208e+06])) +S17GBT.Flux_arc_wrapper(Qubit='Z3', station=station) +# S17GBT.Cryoscope_wrapper(Qubit='Z3', station=station, max_duration=500e-9) + +S17GBT.Single_qubit_phase_calibration_wrapper(qH='Z3', qL='D7', + station=station, fine_cphase_calibration=False) +S17GBT.TwoQ_Randomized_benchmarking_wrapper(qH='Z3', qL='D7', + station=station, recompile=True) + + + + +for i in range(5): + S17GBT.Cryoscope_wrapper(Qubit='Z3', station=station, + max_duration=60e-9, + ro_acq_averages = 2**12) + a = ma2.cv2.multi_qubit_cryoscope_analysis( + update_FIRs=False, + update_IIRs=False, + extract_only=False, + derivative_window_length=2e-9) + a = ma2.cv2.multi_qubit_cryoscope_analysis( + update_FIRs=True, + update_IIRs=False, + extract_only=False, + derivative_window_length=2e-9) + filter_dict = {'params': {'weights': a.proc_data_dict['conv_filters']['Z3']}, + 'model': 'FIR', 'real-time': True } + lin_dist_kern_Z3.filter_model_04(filter_dict) + +S17GBT.Calibrate_CZ_gate( + qH='Z3', qL='D7', station=station, + park_distance = 700e6, + apply_parking_settings = True, + tmid_offset_samples = 5, + calibration_type = 'AB fine', + benchmark = True, + recompile=True, + live_plot_enabled=True, + asymmetry_compensation = False, + calibrate_asymmetry = False, + ) + + + +Qubits = [ + #'D1', 'D2', 'D3', + #'D4', 'D5', 'D6', + #'D7', + #'D8', + #'D9', + #'Z1', 'Z2', 'Z3', 'Z4', + #'X1', + #'X2', + 'X3', 'X4', + ] +# Run single-qubit calibration graph +t_SQG = time.time() +for q in Qubits: + device.find_instrument(q).calibrate_optimal_weights(disable_metadata=True) + device.find_instrument(q).calibrate_frequency_ramsey(disable_metadata=True) + S17GBT.Flipping_wrapper(qubit=q, station=station) + S17GBT.Motzoi_wrapper(qubit=q, station=station) + S17GBT.Flipping_wrapper(qubit=q, station=station) + S17GBT.AllXY_wrapper(qubit=q, station=station) + S17GBT.SSRO_wrapper(qubit=q, station=station) + S17GBT.T1_wrapper(qubit=q, station=station) + S17GBT.T2_wrapper(qubit=q, station=station) + S17GBT.Randomized_benchmarking_wrapper(qubit = q, station=station) +S17GBT.save_snapshot_metadata(station=station) +t_SQG = time.time()-t_SQG \ No newline at end of file diff --git a/pycqed/instrument_drivers/meta_instrument/device_object_CCL.py b/pycqed/instrument_drivers/meta_instrument/device_object_CCL.py index f1f8843901..0b54534215 100644 --- a/pycqed/instrument_drivers/meta_instrument/device_object_CCL.py +++ b/pycqed/instrument_drivers/meta_instrument/device_object_CCL.py @@ -1,6 +1,6481 @@ -""" -This file provides compatibility for existing code. The functionality of this file had been moved to HAL_Device.py -""" -# these imports just rename the new names to the legacy names -from .HAL_Device import HAL_Device as DeviceCCL -from .HAL.HAL_ShimMQ import _acq_ch_map_to_IQ_ch_map \ No newline at end of file +import numpy as np +import time +import logging +import warnings +import adaptive +import networkx as nx +import datetime +from collections import OrderedDict +import multiprocessing +from importlib import reload +from typing import List, Union, Tuple, Optional +import itertools as itt +from math import ceil + +from qcodes.instrument.base import Instrument +from qcodes.utils import validators as vals +from qcodes.instrument.parameter import ( + ManualParameter, + InstrumentRefParameter, + Parameter, +) +from qce_circuit.language.intrf_declarative_circuit import ( + InitialStateContainer, + InitialStateEnum, +) + +from pycqed.analysis import multiplexed_RO_analysis as mra +from pycqed.measurement import detector_functions as det +reload(det) +from pycqed.measurement import cz_cost_functions as cf +reload(cf) + +from pycqed.measurement import sweep_functions as swf +from pycqed.analysis import measurement_analysis as ma +from pycqed.analysis import analysis_toolbox as a_tools +from pycqed.analysis import tomography as tomo +from pycqed.analysis_v2 import measurement_analysis as ma2 +from pycqed.analysis_v2.repeated_stabilizer_analysis import RepeatedStabilizerAnalysis +from pycqed.utilities.general import ( + check_keyboard_interrupt, + print_exception, + get_gate_directions, + get_frequency_waveform, + get_DAC_amp_frequency, + get_Ch_amp_frequency, + get_parking_qubits, +) + +from pycqed.instrument_drivers.physical_instruments.QuTech_AWG_Module import ( + QuTech_AWG_Module, +) +from pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_HDAWG8_LongCryoscope import ZI_HDAWG8_LongCryoscope +from pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_HDAWG8 import ZI_HDAWG8 +# from pycqed.instrument_drivers.physical_instruments.QuTech_CCL import CCL +# from pycqed.instrument_drivers.physical_instruments.QuTech_QCC import QCC +from pycqed.instrument_drivers.physical_instruments.QuTech.CC import CC +import pycqed.analysis_v2.tomography_2q_v2 as tomo_v2 + +from pycqed.utilities import learner1D_minimizer as l1dm +from pycqed.utilities import learnerND_minimizer as lndm + +log = logging.getLogger(__name__) + +try: + from pycqed.measurement.openql_experiments import single_qubit_oql as sqo + import pycqed.measurement.openql_experiments.multi_qubit_oql as mqo + from pycqed.measurement.openql_experiments import clifford_rb_oql as cl_oql + from pycqed.measurement.openql_experiments import openql_helpers as oqh + from pycqed.measurement import cz_cost_functions as czcf + + reload(sqo) + reload(mqo) + reload(cl_oql) + reload(oqh) + reload(czcf) +except ImportError: + log.warning('Could not import OpenQL') + mqo = None + sqo = None + cl_oql = None + oqh = None + czcf = None + + +def _acq_ch_map_to_IQ_ch_map(acq_ch_map): + acq_ch_map_IQ = {} + for acq_instr, ch_map in acq_ch_map.items(): + acq_ch_map_IQ[acq_instr] = {} + for qubit, ch in ch_map.items(): + acq_ch_map_IQ[acq_instr]["{} I".format(qubit)] = ch + acq_ch_map_IQ[acq_instr]["{} Q".format(qubit)] = ch + 1 + return acq_ch_map_IQ + + +class DeviceCCL(Instrument): + """ + Device object for systems controlled using the + CCLight (CCL), QuMa based CC (QCC) or Distributed CC (CC). + FIXME: class name is outdated + """ + def __init__(self, name, **kw): + super().__init__(name, **kw) + + self.msmt_suffix = '_' + name + + self.add_parameter( + 'qubits', + parameter_class=ManualParameter, + initial_value=[], + vals=vals.Lists(elt_validator=vals.Strings()) + ) + + self.add_parameter( + 'qubit_edges', + parameter_class=ManualParameter, + docstring="Denotes edges that connect qubits. " + "Used to define the device topology, needed for two qubit gates.", + initial_value=[[]], + vals=vals.Lists(elt_validator=vals.Lists(elt_validator=vals.Strings())) + ) + + self.add_parameter( + 'qubits_by_feedline', + parameter_class=ManualParameter, + docstring="Nested list of qubits as divided by feedline." + "Used to sort qubits for readout preparation.", + initial_value=[[]], + vals=vals.Lists(elt_validator=vals.Lists(elt_validator=vals.Strings())) + ) + + self.add_parameter( + 'ro_lo_freq', + unit='Hz', + docstring='Frequency of the common LO for all RO pulses.', + parameter_class=ManualParameter + ) + + # actually, it should be possible to build the integration + # weights obeying different settings for different + # qubits, but for now we use a fixed common value. + self.add_parameter( + "ro_acq_integration_length", + initial_value=500e-9, + vals=vals.Numbers(min_value=0, max_value=20e6), + parameter_class=ManualParameter, + ) + + self.add_parameter( + "ro_pow_LO", + label="RO power LO", + unit="dBm", + initial_value=20, + parameter_class=ManualParameter, + ) + self.add_parameter( + "ro_acq_averages", + initial_value=1024, + vals=vals.Numbers(min_value=0, max_value=1e6), + parameter_class=ManualParameter, + ) + + self.add_parameter( + "ro_acq_delay", + unit="s", + label="Readout acquisition delay", + vals=vals.Numbers(min_value=0), + initial_value=0, + parameter_class=ManualParameter, + docstring=( + "The time between the instruction that trigger the" + " readout pulse and the instruction that triggers the " + "acquisition. The positive number means that the " + "acquisition is started after the pulse is send." + ), + ) + + self.add_parameter( + "instr_MC", + label="MeasurementControl", + parameter_class=InstrumentRefParameter,) + self.add_parameter('instr_nested_MC', + label='Nested MeasurementControl', + parameter_class=InstrumentRefParameter) + + self.add_parameter( + "instr_VSM", + label="Vector Switch Matrix", + parameter_class=InstrumentRefParameter, + ) + self.add_parameter( + "instr_CC", + label="Central Controller", + docstring=( + "Device responsible for controlling the experiment" + " using eQASM generated using OpenQL, in the near" + " future will be the CC_Light." + ), + parameter_class=InstrumentRefParameter, + ) + + for i in range(3): # S17 has 3 feedlines + self.add_parameter( + "instr_acq_{}".format(i), parameter_class=InstrumentRefParameter + ) + # Two microwave AWGs are used for S17 + self.add_parameter("instr_AWG_mw_0", parameter_class=InstrumentRefParameter) + self.add_parameter("instr_AWG_mw_1", parameter_class=InstrumentRefParameter) + self.add_parameter("instr_AWG_mw_2", parameter_class=InstrumentRefParameter) + self.add_parameter("instr_AWG_mw_3", parameter_class=InstrumentRefParameter) + self.add_parameter("instr_AWG_mw_4", parameter_class=InstrumentRefParameter) + + self.add_parameter("instr_AWG_flux_0", parameter_class=InstrumentRefParameter) + self.add_parameter("instr_AWG_flux_1", parameter_class=InstrumentRefParameter) + self.add_parameter("instr_AWG_flux_2", parameter_class=InstrumentRefParameter) + + ro_acq_docstr = ( + "Determines what type of integration weights to use: " + "\n\t SSB: Single sideband demodulation\n\t" + 'optimal: waveforms specified in "RO_acq_weight_func_I" ' + '\n\tand "RO_acq_weight_func_Q"' + ) + + self.add_parameter( + "ro_acq_weight_type", + initial_value="SSB", + vals=vals.Enum("SSB", "optimal", "optimal IQ", "custom"), + docstring=ro_acq_docstr, + parameter_class=ManualParameter, + ) + + self.add_parameter( + "ro_acq_digitized", + vals=vals.Bool(), + initial_value=False, + parameter_class=ManualParameter, + ) + + self.add_parameter( + "cfg_openql_platform_fn", + label="OpenQL platform configuration filename", + parameter_class=ManualParameter, + vals=vals.Strings(), + ) + + self.add_parameter( + "ro_always_all", + docstring="If true, configures the UHFQC to RO all qubits " + "independent of codeword received.", + parameter_class=ManualParameter, + vals=vals.Bool(), + ) + + # Timing related parameters + self.add_parameter( + "tim_ro_latency_0", + unit="s", + label="Readout latency 0", + parameter_class=ManualParameter, + initial_value=0, + vals=vals.Numbers(), + ) + self.add_parameter( + "tim_ro_latency_1", + unit="s", + label="Readout latency 1", + parameter_class=ManualParameter, + initial_value=0, + vals=vals.Numbers(), + ) + self.add_parameter( + "tim_ro_latency_2", + unit="s", + label="Readout latency 2", + parameter_class=ManualParameter, + initial_value=0, + vals=vals.Numbers(), + ) + self.add_parameter( + "tim_ro_latency_3", + unit="s", + label="Readout latency 2", + parameter_class=ManualParameter, + initial_value=0, + vals=vals.Numbers(), + ) + self.add_parameter( + "tim_flux_latency_0", + unit="s", + label="Flux latency 0", + parameter_class=ManualParameter, + initial_value=0, + vals=vals.Numbers(), + ) + self.add_parameter( + "tim_flux_latency_1", + unit="s", + label="Flux latency 1", + parameter_class=ManualParameter, + initial_value=0, + vals=vals.Numbers(), + ) + self.add_parameter( + "tim_flux_latency_2", + unit="s", + label="Flux latency 2", + parameter_class=ManualParameter, + initial_value=0, + vals=vals.Numbers(), + ) + self.add_parameter( + "tim_mw_latency_0", + unit="s", + label="Microwave latency 0", + parameter_class=ManualParameter, + initial_value=0, + vals=vals.Numbers(), + ) + self.add_parameter( + "tim_mw_latency_1", + unit="s", + label="Microwave latency 1", + parameter_class=ManualParameter, + initial_value=0, + vals=vals.Numbers(), + ) + self.add_parameter( + "tim_mw_latency_2", + unit="s", + label="Microwave latency 2", + parameter_class=ManualParameter, + initial_value=0, + vals=vals.Numbers(), + ) + self.add_parameter( + "tim_mw_latency_3", + unit="s", + label="Microwave latency 3", + parameter_class=ManualParameter, + initial_value=0, + vals=vals.Numbers(), + ) + self.add_parameter( + "tim_mw_latency_4", + unit="s", + label="Microwave latency 4", + parameter_class=ManualParameter, + initial_value=0, + vals=vals.Numbers(), + ) + + self.add_parameter( + "dio_map", + docstring="The map between DIO" + " channel number and functionality (ro_x, mw_x, flux_x). " + "From 2020-03-19 on, Requires to be configured by the user in each set up. " + "For convenience here are the mapping for the devices with fixed mappings:\n" + "CCL:\n" + " {\n" + " 'ro_0': 1,\n" + " 'ro_1': 2,\n" + " 'flux_0': 3,\n" + " 'mw_0': 4,\n" + " 'mw_1': 5\n" + " }\n" + "QCC:\n" + " {\n" + " 'ro_0': 1,\n" + " 'ro_1': 2,\n" + " 'ro_2': 3,\n" + " 'mw_0': 4,\n" + " 'mw_1': 5,\n" + " 'flux_0': 6,\n" + " 'flux_1': 7,\n" + " 'flux_2': 8,\n" + " 'flux_3': 9,\n" + " 'mw_2': 10,\n" + " 'mw_3': 11\n" + " 'mw_4': 12\n" + " }\n" + "Tip: run `device.dio_map?` to print the docstring of this parameter", + initial_value=None, + set_cmd=self._set_dio_map, + vals=vals.Dict(), + ) + + def _set_dio_map(self, dio_map_dict): + allowed_keys = {"ro_", "mw_", "flux_"} + for key in dio_map_dict: + assert np.any( + [a_key in key and len(key) > len(a_key) for a_key in allowed_keys] + ), "Key `{}` must start with:" " `{}`!".format(key, list(allowed_keys)) + return dio_map_dict + + def _grab_instruments_from_qb(self): + """ + initialize instruments that should only exist once from the first + qubit. Maybe must be done in a more elegant way (at least check + uniqueness). + """ + + qb = self.find_instrument(self.qubits()[0]) + self.instr_MC(qb.instr_MC()) + self.instr_VSM(qb.instr_VSM()) + self.instr_CC(qb.instr_CC()) + self.cfg_openql_platform_fn(qb.cfg_openql_platform_fn()) + + def prepare_timing(self): + """ + Responsible for ensuring timing is configured correctly. + Takes parameters starting with `tim_` and uses them to set the correct + latencies on the DIO ports of the CCL or QCC. + N.B. latencies are set in multiples of 20ns in the DIO. + Latencies shorter than 20ns are set as channel delays in the AWGs. + These are set globally. If individual (per channel) setting of latency + is required in the future, we can add this. + """ + # 2. Setting the latencies + cc = self.instr_CC.get_instr() + if cc.IDN()['model']=='CCL': + latencies = OrderedDict( + [ + ("ro_0", self.tim_ro_latency_0()), + ("ro_1", self.tim_ro_latency_1()), + # ('ro_2', self.tim_ro_latency_2()), + ("mw_0", self.tim_mw_latency_0()), + ("mw_1", self.tim_mw_latency_1()), + ("flux_0", self.tim_flux_latency_0()) + # ('flux_1', self.tim_flux_latency_1()), + # ('flux_2', self.tim_flux_latency_2()), + # ('mw_2', self.tim_mw_latency_2()), + # ('mw_3', self.tim_mw_latency_3()), + # ('mw_4', self.tim_mw_latency_4())] + ] + ) + else: + latencies = OrderedDict( + [ + ("ro_0", self.tim_ro_latency_0()), + ("ro_1", self.tim_ro_latency_1()), + ("ro_2", self.tim_ro_latency_2()), + ("ro_3", self.tim_ro_latency_3()), + ("flux_0", self.tim_flux_latency_0()), + ("flux_1", self.tim_flux_latency_1()), + ("flux_2", self.tim_flux_latency_2()), + ("mw_0", self.tim_mw_latency_0()), + ("mw_1", self.tim_mw_latency_1()), + ("mw_2", self.tim_mw_latency_2()), + ("mw_3", self.tim_mw_latency_3()), + ("mw_4", self.tim_mw_latency_4()), + ] + ) + + # NB: Mind that here number precision matters a lot! + # Tripple check everything if any changes are to be made + + # Substract lowest value to ensure minimal latency is used. + # note that this also supports negative delays (which is useful for + # calibrating) + lowest_value = min(latencies.values()) + for key, val in latencies.items(): + # Align to minimum and change to ns to avoid number precision problems + # The individual multiplications are on purpose + latencies[key] = val * 1e9 - lowest_value * 1e9 + + # Only apply fine latencies above 1 ps (HDAWG8 minimum fine delay) + ns_tol = 1e-3 + + # ensuring that RO latency is a multiple of 20 ns as the UHFQC does + # not have a fine timing control. + ro_latency_modulo_20 = latencies["ro_0"] % 20 + # `% 20` is for the case ro_latency_modulo_20 == 20 ns + correction_for_multiple = (20 - ro_latency_modulo_20) % 20 + if correction_for_multiple >= ns_tol: # at least one 1 ps + # Only apply corrections if they are significant + for key, val in latencies.items(): + latencies[key] = val + correction_for_multiple + + # Setting the latencies in the CCL + # Iterate over keys in dio_map as this ensures only relevant + # timing setting are set. + for lat_key, dio_ch in self.dio_map().items(): + lat = latencies[lat_key] + lat_coarse = int(np.round(lat) // 20) # Convert to CC dio value + lat_fine = lat % 20 + lat_fine = lat_fine * 1e-9 if lat_fine <= 20 - ns_tol else 0 + log.debug( + "Setting `dio{}_out_delay` for `{}` to `{}`. (lat_fine: {:4g})".format( + dio_ch, lat_key, lat_coarse, lat_fine + ) + ) + cc.set("dio{}_out_delay".format(dio_ch), lat_coarse) + + # RO devices do not support fine delay setting. + if "mw" in lat_key: + # Check name to prevent crash when instrument not specified + AWG_name = self.get("instr_AWG_{}".format(lat_key)) + + if AWG_name is not None: + AWG = self.find_instrument(AWG_name) + using_QWG = AWG.__class__.__name__ == "QuTech_AWG_Module" + if not using_QWG: + AWG.stop() + for qubit in self.qubits(): + q_obj = self.find_instrument(qubit) + MW_lm = self.find_instrument(q_obj.instr_LutMan_MW()) + if AWG_name == MW_lm.AWG(): + extra_delay = q_obj.mw_fine_delay() + awg_chs = MW_lm.channel_I(), MW_lm.channel_Q() + log.debug("Setting `sigouts_{}_delay` to {:4g}" + " in {}".format(awg_chs[0], lat_fine, AWG.name)) + AWG.set("sigouts_{}_delay".format(awg_chs[0]-1), lat_fine+extra_delay) + AWG.set("sigouts_{}_delay".format(awg_chs[1]-1), lat_fine+extra_delay) + if self.find_instrument(qubit).instr_LutMan_LRU(): + q_obj = self.find_instrument(qubit) + MW_lm_LRU = self.find_instrument(q_obj.instr_LutMan_LRU()) + if AWG_name == MW_lm_LRU.AWG(): + awg_chs = MW_lm_LRU.channel_I(), MW_lm_LRU.channel_Q() + log.debug("Setting `sigouts_{}_delay` to {:4g}" + " in {}".format(awg_chs[0], lat_fine, AWG.name)) + AWG.set("sigouts_{}_delay".format(awg_chs[0]-1), lat_fine) + AWG.set("sigouts_{}_delay".format(awg_chs[1]-1), lat_fine) + AWG.start() + # All channels are set globally from the device object. + # for i in range(8): # assumes the AWG is an HDAWG + # log.debug( + # "Setting `sigouts_{}_delay` to {:4g}" + # " in {}".format(i, lat_fine, AWG.name) + # ) + # AWG.set("sigouts_{}_delay".format(i), lat_fine) + # ch_not_ready = 8 + # while ch_not_ready > 0: + # ch_not_ready = 0 + # for i in range(8): + # ch_not_ready += AWG.geti("sigouts/{}/busy".format(i)) + # check_keyboard_interrupt() + + def prepare_fluxing(self, qubits): + for qb_name in qubits: + qb = self.find_instrument(qb_name) + try: + fl_lutman = qb.instr_LutMan_Flux.get_instr() + fl_lutman.load_waveforms_onto_AWG_lookuptable() + except Exception as e: + warnings.warn("Could not load flux pulses for {}".format(qb)) + warnings.warn("Exception {}".format(e)) + + def prepare_readout(self, qubits, + reduced: bool = False, + qubit_int_weight_type_dict: dict=None): + """ + Configures readout for specified qubits. + + Args: + qubits (list of str): + list of qubit names that have to be prepared + qubit_int_weight_type_dict (dict of str): + dictionary specifying individual acquisition weight types + for qubits. example: + qubit_int_weight_type_dict={'X3':'optimal IQ', + 'D4':'optimal', + 'D5':'optimal IQ'} + Note: Make sure to use ro_acq_weight_type('custom') for + this to work! + Warning: Only allows for 'optimal' and 'optimal IQ'. + Also, order of dictionary should be same as 'qubits'. + Only works for the int_log_det and int_avg_det. + """ + log.info('Configuring readout for {}'.format(qubits)) + + if not reduced: + self._prep_ro_sources(qubits=qubits) + + acq_ch_map = self._prep_ro_assign_weights(qubits=qubits, + qubit_int_weight_type_dict = qubit_int_weight_type_dict) + self._prep_ro_integration_weights(qubits=qubits, + qubit_int_weight_type_dict = qubit_int_weight_type_dict) + if not reduced: + self._prep_ro_pulses(qubits=qubits) + self._prep_ro_instantiate_detectors(qubits=qubits, + acq_ch_map=acq_ch_map) + + # TODO: + # - update global readout parameters (relating to mixer settings) + # the pulse mixer + # - ro_mixer_alpha, ro_mixer_phi + # - ro_mixer_offs_I, ro_mixer_offs_Q + # - ro_acq_delay + # the acquisition mixer + # commented out because it conflicts with setting in the qubit object + + # # These parameters affect all resonators. + # # Should not be part of individual qubits + # ro_lm.set('pulse_type', 'M_' + qb.ro_pulse_type()) + # ro_lm.set('mixer_alpha', + # qb.ro_pulse_mixer_alpha()) + # ro_lm.set('mixer_phi', + # qb.ro_pulse_mixer_phi()) + # ro_lm.set('mixer_offs_I', qb.ro_pulse_mixer_offs_I()) + # ro_lm.set('mixer_offs_Q', qb.ro_pulse_mixer_offs_Q()) + # ro_lm.acquisition_delay(qb.ro_acq_delay()) + + # ro_lm.set_mixer_offsets() + + def _prep_ro_sources(self, qubits): + """ + turn on and configure the RO LO's of all qubits to be measured and + update the modulation frequency of all qubits. + """ + # This device object works under the assumption that a single LO + # is used to drive all readout lines. + LO = self.find_instrument(qubits[0]).instr_LO_ro.get_instr() + LO_lutman = self.find_instrument(qubits[0]).instr_LutMan_RO.get_instr() + LO.frequency.set(LO_lutman.LO_freq()) + LO.power(self.ro_pow_LO()) + LO.on() + + for qb_name in qubits: + qb = self.find_instrument(qb_name) + ro_lutman = qb.instr_LutMan_RO.get_instr() + # set RO modulation to use common LO frequency + mod_freq = qb.ro_freq() - ro_lutman.LO_freq() + log.info("Setting modulation freq of {} to {}".format(qb_name, mod_freq)) + qb.ro_freq_mod(mod_freq) + + LO_q = qb.instr_LO_ro.get_instr() + if LO_q is not LO: + LO_q.frequency.set(ro_lutman.LO_freq()) + #LO_q.power(self.ro_pow_LO()) + LO_q.on() + #raise ValueError("Expect a single LO to drive all feedlines") + + def _prep_ro_assign_weights(self, qubits, qubit_int_weight_type_dict=None): + """ + Assign acquisition weight channels to the different qubits. + + Args: + qubits (list of str): + list of qubit names that have to be prepared + + Returns + acq_ch_map (dict) + a mapping of acquisition instruments and channels used + for each qubit. + + The assignment is done based on the acq_instr used for each qubit + and the number of channels used per qubit. N.B. This method of mapping + has no implicit feedline or UHFQC contraint built in. + + The mapping of acq_channels to qubits is stored in self._acq_ch_map + for debugging purposes. + """ + log.info('Setting up acquisition channels') + + if not qubit_int_weight_type_dict: + if self.ro_acq_weight_type() == 'optimal': + log.debug('ro_acq_weight_type = "optimal" using 1 ch per qubit') + nr_of_acq_ch_per_qubit = 1 + else: + log.debug('Using 2 ch per qubit') + nr_of_acq_ch_per_qubit = 2 + + acq_ch_map = {} + for qb_name in qubits: + qb = self.find_instrument(qb_name) + acq_instr = qb.instr_acquisition() + if not acq_instr in acq_ch_map.keys(): + acq_ch_map[acq_instr] = {} + + assigned_weight = len(acq_ch_map[acq_instr]) * nr_of_acq_ch_per_qubit + log.info( + "Assigning {} w{} to qubit {}".format( + acq_instr, assigned_weight, qb_name + ) + ) + acq_ch_map[acq_instr][qb_name] = assigned_weight + if assigned_weight > 9: + # There are only 10 acq_weight_channels per UHF. + # use optimal ro weights or read out less qubits. + raise ValueError("Trying to assign too many acquisition weights") + + qb.ro_acq_weight_chI(assigned_weight) + # even if the mode does not use Q weight, we still assign this + # this is for when switching back to the qubit itself + qb.ro_acq_weight_chQ(assigned_weight + 1) + + log.info("acq_channel_map: \n\t{}".format(acq_ch_map)) + + else: + log.info('Using custom acq_channel_map') + for q in qubits: + assert q in qubit_int_weight_type_dict.keys(), f"Qubit {q} not present in qubit_int_weight_type_dict" + acq_ch_map = {} + for qb_name, w_type in qubit_int_weight_type_dict.items(): + qb = self.find_instrument(qb_name) + acq_instr = qb.instr_acquisition() + if not acq_instr in acq_ch_map.keys(): + if w_type == 'optimal IQ': + acq_ch_map[acq_instr] = {f'{qb_name} I': 0, + f'{qb_name} Q': 1} + log.info("Assigning {} w0 and w1 to qubit {}".format( + acq_instr, qb_name)) + else: + acq_ch_map[acq_instr] = {f'{qb_name}': 0} + log.info("Assigning {} w0 to qubit {}".format( + acq_instr, qb_name)) + # even if the mode does not use Q weight, we still assign this + # this is for when switching back to the qubit itself + qb.ro_acq_weight_chI(0) + qb.ro_acq_weight_chQ(1) + + else: + _nr_channels_taken = len(acq_ch_map[acq_instr]) + if w_type == 'optimal IQ': + acq_ch_map[acq_instr][f'{qb_name} I'] = _nr_channels_taken + acq_ch_map[acq_instr][f'{qb_name} Q'] = _nr_channels_taken+1 + log.info("Assigning {} w{} and w{} to qubit {}".format( + acq_instr, _nr_channels_taken, _nr_channels_taken+1, qb_name)) + if _nr_channels_taken+1 > 10: + # There are only 10 acq_weight_channels per UHF. + # use optimal ro weights or read out less qubits. + raise ValueError("Trying to assign too many acquisition weights") + qb.ro_acq_weight_chI(_nr_channels_taken) + qb.ro_acq_weight_chQ(_nr_channels_taken+1) + else: + acq_ch_map[acq_instr][f'{qb_name}'] = _nr_channels_taken + log.info("Assigning {} w{} to qubit {}".format( + acq_instr, _nr_channels_taken, qb_name)) + if _nr_channels_taken > 10: + # There are only 10 acq_weight_channels per UHF. + # use optimal ro weights or read out less qubits. + raise ValueError("Trying to assign too many acquisition weights") + qb.ro_acq_weight_chI(_nr_channels_taken) + + log.info("Clearing UHF correlation settings") + for acq_instr_name in acq_ch_map.keys(): + self.find_instrument(acq_instr).reset_correlation_params() + self.find_instrument(acq_instr).reset_crosstalk_matrix() + + # Stored as a private attribute for debugging purposes. + self._acq_ch_map = acq_ch_map + + return acq_ch_map + + def _prep_ro_integration_weights(self, qubits, qubit_int_weight_type_dict=None): + """ + Set the acquisition integration weights on each channel. + + Args: + qubits (list of str): + list of qubit names that have to be prepared + """ + log.info("Setting integration weights") + + if self.ro_acq_weight_type() == "SSB": + log.info("using SSB weights") + for qb_name in qubits: + qb = self.find_instrument(qb_name) + acq_instr = qb.instr_acquisition.get_instr() + + acq_instr.prepare_SSB_weight_and_rotation( + IF=qb.ro_freq_mod(), + weight_chI=qb.ro_acq_weight_chI(), + weight_chQ=qb.ro_acq_weight_chQ(), + ) + + elif 'optimal' in self.ro_acq_weight_type(): + log.info("using optimal weights") + for qb_name in qubits: + qb = self.find_instrument(qb_name) + acq_instr = qb.instr_acquisition.get_instr() + opt_WI = qb.ro_acq_weight_func_I() + opt_WQ = qb.ro_acq_weight_func_Q() + # N.B. no support for "delay samples" relating to #63 + if opt_WI is None or opt_WQ is None: + # do not raise an exception as it should be possible to + # run input avg experiments to calibrate the optimal weights. + log.warning("No optimal weights defined for" + " {}, not updating weights".format(qb_name)) + else: + acq_instr.set("qas_0_integration_weights_{}_real".format( + qb.ro_acq_weight_chI()), opt_WI,) + acq_instr.set("qas_0_integration_weights_{}_imag".format( + qb.ro_acq_weight_chI()), opt_WQ,) + acq_instr.set("qas_0_rotations_{}".format( + qb.ro_acq_weight_chI()), 1.0 - 1.0j) + if self.ro_acq_weight_type() == 'optimal IQ': + print('setting the optimal Q') + acq_instr.set('qas_0_integration_weights_{}_real'.format( + qb.ro_acq_weight_chQ()), opt_WQ) + acq_instr.set('qas_0_integration_weights_{}_imag'.format( + qb.ro_acq_weight_chQ()), opt_WI) + acq_instr.set('qas_0_rotations_{}'.format( + qb.ro_acq_weight_chQ()), 1.0 + 1.0j) + + if self.ro_acq_digitized(): + # Update the RO theshold + if (qb.ro_acq_rotated_SSB_when_optimal() and + abs(qb.ro_acq_threshold()) > 32): + threshold = 32 + log.warning( + "Clipping ro_acq threshold of {} to 32".format(qb.name)) + # working around the limitation of threshold in UHFQC + # which cannot be >abs(32). + # See also self._prep_ro_integration_weights scaling the weights + else: + threshold = qb.ro_acq_threshold() + + qb.instr_acquisition.get_instr().set( + "qas_0_thresholds_{}_level".format(qb.ro_acq_weight_chI()), + threshold, + ) + log.info("Setting threshold of {} to {}".format(qb.name, threshold)) + # Note, no support for optimal IQ in mux RO + # Note, no support for ro_cq_rotated_SSB_when_optimal + + elif 'custom' in self.ro_acq_weight_type(): + assert qubit_int_weight_type_dict != None + for q in qubits: + assert q in qubit_int_weight_type_dict.keys(), f"Qubit {q} not present in qubit_int_weight_type_dict" + log.info("using optimal custom mapping") + for qb_name in qubits: + qb = self.find_instrument(qb_name) + acq_instr = qb.instr_acquisition.get_instr() + opt_WI = qb.ro_acq_weight_func_I() + opt_WQ = qb.ro_acq_weight_func_Q() + # N.B. no support for "delay samples" relating to #63 + if opt_WI is None or opt_WQ is None: + # do not raise an exception as it should be possible to + # run input avg experiments to calibrate the optimal weights. + log.warning("No optimal weights defined for" + " {}, not updating weights".format(qb_name)) + else: + acq_instr.set("qas_0_integration_weights_{}_real".format( + qb.ro_acq_weight_chI()), opt_WI,) + acq_instr.set("qas_0_integration_weights_{}_imag".format( + qb.ro_acq_weight_chI()), opt_WQ,) + acq_instr.set("qas_0_rotations_{}".format( + qb.ro_acq_weight_chI()), 1.0 - 1.0j) + if qubit_int_weight_type_dict[qb_name] == 'optimal IQ': + print('setting the optimal Q') + acq_instr.set('qas_0_integration_weights_{}_real'.format( + qb.ro_acq_weight_chQ()), opt_WQ) + acq_instr.set('qas_0_integration_weights_{}_imag'.format( + qb.ro_acq_weight_chQ()), opt_WI) + acq_instr.set('qas_0_rotations_{}'.format( + qb.ro_acq_weight_chQ()), 1.0 + 1.0j) + + else: + raise NotImplementedError('ro_acq_weight_type "{}" not supported'.format( + self.ro_acq_weight_type())) + + def _prep_ro_pulses(self, qubits): + """ + Configure the ro lutmans. + + The configuration includes + - setting the right parameters for all readout pulses + - uploading the waveforms to the UHFQC + - setting the "resonator_combinations" that determine allowed pulses + N.B. by convention we support all individual readouts and + the readout all qubits instruction. + """ + + ro_lms = [] + + resonators_in_lm = {} + + for qb_name in qubits: + qb = self.find_instrument(qb_name) + # qubit and resonator number are identical + res_nr = qb.cfg_qubit_nr() + ro_lm = qb.instr_LutMan_RO.get_instr() + + # Add resonator to list of resonators in lm + if ro_lm not in ro_lms: + ro_lms.append(ro_lm) + resonators_in_lm[ro_lm.name] = [] + resonators_in_lm[ro_lm.name].append(res_nr) + + # update parameters of RO pulse in ro lutman + + # ro_freq_mod was updated in self._prep_ro_sources + ro_lm.set("M_modulation_R{}".format(res_nr), qb.ro_freq_mod()) + + ro_lm.set("M_length_R{}".format(res_nr), qb.ro_pulse_length()) + ro_lm.set("M_amp_R{}".format(res_nr), qb.ro_pulse_amp()) + ro_lm.set("M_delay_R{}".format(res_nr), qb.ro_pulse_delay()) + ro_lm.set("M_phi_R{}".format(res_nr), qb.ro_pulse_phi()) + ro_lm.set("M_down_length0_R{}".format(res_nr), qb.ro_pulse_down_length0()) + ro_lm.set("M_down_amp0_R{}".format(res_nr), qb.ro_pulse_down_amp0()) + ro_lm.set("M_down_phi0_R{}".format(res_nr), qb.ro_pulse_down_phi0()) + ro_lm.set("M_down_length1_R{}".format(res_nr), qb.ro_pulse_down_length1()) + ro_lm.set("M_down_amp1_R{}".format(res_nr), qb.ro_pulse_down_amp1()) + ro_lm.set("M_down_phi1_R{}".format(res_nr), qb.ro_pulse_down_phi1()) + + for ro_lm in ro_lms: + # list comprehension should result in a list with each + # individual resonator + the combination of all simultaneously + # resonator_combs = [[r] for r in resonators_in_lm[ro_lm.name]] + \ + # [resonators_in_lm[ro_lm.name]] + resonator_combs = [resonators_in_lm[ro_lm.name]] + log.info('Setting resonator combinations for {} to {}'.format( + ro_lm.name, resonator_combs)) + ro_lm.acquisition_delay(self.ro_acq_delay()) + ro_lm.resonator_combinations(resonator_combs) + ro_lm.load_DIO_triggered_sequence_onto_UHFQC() + + def get_correlation_detector(self, qubits: list, + single_int_avg: bool = False, + seg_per_point: int = 1, + always_prepare: bool = False): + if self.ro_acq_digitized(): + log.warning('Digitized mode gives bad results') + if len(qubits) != 2: + raise ValueError("Not possible to define correlation " + "detector for more than two qubits") + if self.ro_acq_weight_type() != 'optimal': + raise ValueError('Correlation detector only works ' + 'with optimal weights') + q0 = self.find_instrument(qubits[0]) + q1 = self.find_instrument(qubits[1]) + + w0 = q0.ro_acq_weight_chI() + w1 = q1.ro_acq_weight_chI() + + if q0.instr_acquisition.get_instr() == q1.instr_acquisition.get_instr(): + d = det.UHFQC_correlation_detector( + UHFQC=q0.instr_acquisition.get_instr(), # <- hack line + thresholding=self.ro_acq_digitized(), + AWG=self.instr_CC.get_instr(), + channels=[w0, w1], correlations=[(w0, w1)], + nr_averages=self.ro_acq_averages(), + integration_length=q0.ro_acq_integration_length(), + single_int_avg=single_int_avg, + seg_per_point=seg_per_point, + always_prepare=always_prepare) + d.value_names = ['{} w{}'.format(qubits[0], w0), + '{} w{}'.format(qubits[1], w1), + 'Corr ({}, {})'.format(qubits[0], qubits[1])] + else: + # This should raise a ValueError but exists for legacy reasons. + # WARNING DEBUG HACK + d = self.get_int_avg_det(qubits=qubits, + single_int_avg=single_int_avg, + seg_per_point=seg_per_point, + always_prepare=always_prepare) + + return d + + def get_int_logging_detector(self, qubits=None, result_logging_mode='raw'): + + # qubits passed to but not used in function? + + if self.ro_acq_weight_type() == 'SSB': + result_logging_mode = 'raw' + elif 'optimal' in self.ro_acq_weight_type(): + # lin_trans includes + result_logging_mode = 'lin_trans' + if self.ro_acq_digitized(): + result_logging_mode = 'digitized' + elif 'custom' in self.ro_acq_weight_type(): + # lin_trans includes + result_logging_mode = "lin_trans" + + log.info("Setting result logging mode to {}".format(result_logging_mode)) + + if (self.ro_acq_weight_type() != "optimal") and\ + (self.ro_acq_weight_type() != "custom"): + acq_ch_map = _acq_ch_map_to_IQ_ch_map(self._acq_ch_map) + else: + acq_ch_map = self._acq_ch_map + + int_log_dets = [] + for i, acq_instr_name in enumerate(self._acq_ch_map.keys()): + if i == 0: + CC = self.instr_CC.get_instr() + else: + CC = None + + UHFQC = self.find_instrument(acq_instr_name) + int_log_dets.append( + det.UHFQC_integration_logging_det( + channels=list(acq_ch_map[acq_instr_name].values()), + value_names=list(acq_ch_map[acq_instr_name].keys()), + UHFQC=UHFQC, AWG=CC, + result_logging_mode=result_logging_mode, + integration_length=self.ro_acq_integration_length(), + ) + ) + + int_log_det = det.Multi_Detector_UHF( + detectors=int_log_dets, detector_labels=list(self._acq_ch_map.keys()) + ) + + return int_log_det + + def _prep_ro_instantiate_detectors(self, qubits, acq_ch_map): + """ + Instantiate acquisition detectors. + + Args: + qubits (list of str): + list of qubit names that have to be prepared + acq_ch_map (dict) + dict specifying the mapping + """ + log.info("Instantiating readout detectors") + self.input_average_detector = self.get_input_avg_det() + self.int_avg_det = self.get_int_avg_det() + self.int_avg_det_single = self.get_int_avg_det(single_int_avg=True) + self.int_log_det = self.get_int_logging_detector() + + if len(qubits) == 2 and self.ro_acq_weight_type() == 'optimal': + self.corr_det = self.get_correlation_detector(qubits=qubits) + else: + self.corr_det = None + + def get_input_avg_det(self, **kw): + """ + Create an input average multi detector based. + + The input average multi detector is based on the self._acq_ch_map + that gets set when calling self.prepare_readout(qubits). + """ + input_average_detectors = [] + + for i, acq_instr_name in enumerate(self._acq_ch_map.keys()): + if i == 0: + CC = self.instr_CC.get_instr() + else: + CC = None + UHFQC = self.find_instrument(acq_instr_name) + + input_average_detectors.append( + det.UHFQC_input_average_detector( + UHFQC=UHFQC, + AWG=CC, + nr_averages=self.ro_acq_averages(), + nr_samples=int(self.ro_acq_integration_length() * 1.8e9), + ), + **kw + ) + + input_average_detector = det.Multi_Detector_UHF( + detectors=input_average_detectors, + detector_labels=list(self._acq_ch_map.keys()), + ) + + return input_average_detector + + def get_int_avg_det(self, qubits=None, **kw): + """ + """ + if qubits is not None: + log.warning("qubits is deprecated") + + if self.ro_acq_weight_type() == "SSB": + result_logging_mode = "raw" + elif 'optimal' in self.ro_acq_weight_type(): + # lin_trans includes + result_logging_mode = "lin_trans" + if self.ro_acq_digitized(): + result_logging_mode = "digitized" + elif 'custom' in self.ro_acq_weight_type(): + # lin_trans includes + result_logging_mode = "lin_trans" + + log.info("Setting result logging mode to {}".format(result_logging_mode)) + + if (self.ro_acq_weight_type() != "optimal") and\ + (self.ro_acq_weight_type() != "custom"): + acq_ch_map = _acq_ch_map_to_IQ_ch_map(self._acq_ch_map) + else: + acq_ch_map = self._acq_ch_map + + int_avg_dets = [] + for i, acq_instr_name in enumerate(acq_ch_map.keys()): + # The master detector is the one that holds the CC object + if i == 0: + CC = self.instr_CC.get_instr() + else: + CC = None + int_avg_dets.append( + det.UHFQC_integrated_average_detector( + channels=list(acq_ch_map[acq_instr_name].values()), + value_names=list(acq_ch_map[acq_instr_name].keys()), + UHFQC=self.find_instrument(acq_instr_name), + AWG=CC, + result_logging_mode=result_logging_mode, + nr_averages=self.ro_acq_averages(), + integration_length=self.ro_acq_integration_length(), **kw + ) + ) + + int_average_detector = det.Multi_Detector_UHF( + detectors=int_avg_dets, detector_labels=list(self._acq_ch_map.keys()) + ) + return int_average_detector + + def _prep_td_configure_VSM(self): + """ + turn off all VSM channels and then use qubit settings to + turn on the required channels again. + """ + + # turn all channels on all VSMs off + for qb_name in self.qubits(): + qb = self.find_instrument(qb_name) + VSM = qb.instr_VSM.get_instr() + # VSM.set_all_switches_to('OFF') # FIXME: commented out + + # turn the desired channels on + for qb_name in self.qubits(): + qb = self.find_instrument(qb_name) + log + + # Configure VSM + # N.B. This configure VSM block is geared specifically to the + # Duplexer/BlueBox VSM + # FIXME: code below commented out + # VSM = qb.instr_VSM.get_instr() + # Gin = qb.mw_vsm_ch_in() + # Din = qb.mw_vsm_ch_in() + # out = qb.mw_vsm_mod_out() + + # VSM.set('in{}_out{}_switch'.format(Gin, out), qb.mw_vsm_switch()) + # VSM.set('in{}_out{}_switch'.format(Din, out), qb.mw_vsm_switch()) + + # VSM.set('in{}_out{}_att'.format(Gin, out), qb.mw_vsm_G_att()) + # VSM.set('in{}_out{}_att'.format(Din, out), qb.mw_vsm_D_att()) + # VSM.set('in{}_out{}_phase'.format(Gin, out), qb.mw_vsm_G_phase()) + # VSM.set('in{}_out{}_phase'.format(Din, out), qb.mw_vsm_D_phase()) + + # self.instr_CC.get_instr().set( + # 'vsm_channel_delay{}'.format(qb.cfg_qubit_nr()), + # qb.mw_vsm_delay()) + + def prepare_for_timedomain(self, qubits: list, reduced: bool = False, + bypass_flux: bool = False, + prepare_for_readout: bool = True): + """ + Prepare setup for a timedomain experiment: + + Args: + qubits (list of str): + list of qubit names that have to be prepared + """ + if prepare_for_readout: + self.prepare_readout(qubits=qubits, reduced=reduced) + if reduced: + return + if bypass_flux is False: + self.prepare_fluxing(qubits=qubits) + self.prepare_timing() + + for qb_name in qubits: + qb = self.find_instrument(qb_name) + qb._prep_td_sources() + qb._prep_mw_pulses() + # qb._set_mw_fine_delay(qb.mw_fine_delay()) + + # self._prep_td_configure_VSM() + + ######################################################## + # Measurement methods + ######################################################## + def measure_conditional_oscillation( + self, + q0: str, + q1: str, + q2: str = None, + q3: str = None, + flux_codeword="cz", + flux_codeword_park=None, + parked_qubit_seq=None, + downsample_swp_points=1, # x2 and x3 available + prepare_for_timedomain=True, + MC=None, + disable_cz: bool = False, + disabled_cz_duration_ns: int = 60, + cz_repetitions: int = 1, + wait_time_before_flux_ns: int = 0, + wait_time_after_flux_ns: int = 0, + disable_parallel_single_q_gates: bool = False, + label="", + verbose=True, + disable_metadata=False, + extract_only=False, + ): + """ + Measures the "conventional cost function" for the CZ gate that + is a conditional oscillation. In this experiment the conditional phase + in the two-qubit Cphase gate is measured using Ramsey-lie sequence. + Specifically qubit q0 is prepared in the superposition, while q1 is in 0 or 1 state. + Next the flux pulse is applied. Finally pi/2 afterrotation around various axes + is applied to q0, and q1 is flipped back (if neccessary) to 0 state. + Plotting the probabilities of the zero state for each qubit as a function of + the afterrotation axis angle, and comparing case of q1 in 0 or 1 state, enables to + measure the conditional phase and estimale the leakage of the Cphase gate. + + Refs: + Rol arXiv:1903.02492, Suppl. Sec. D + + Args: + q0 (str): + target qubit name (i.e. the qubit in the superposition state) + + q1 (str): + control qubit name (i.e. the qubit remaining in 0 or 1 state) + q2, q3 (str): + names of optional extra qubit to either park or apply a CZ to. + flux_codeword (str): + the gate to be applied to the qubit pair q0, q1 + flux_codeword_park (str): + optionally park qubits q2 (and q3) with either a 'park' pulse + (single qubit operation on q2) or a 'cz' pulse on q2-q3. + NB: depending on the CC configurations the parking can be + implicit in the main `cz` + prepare_for_timedomain (bool): + should the insruments be reconfigured for time domain measurement + disable_cz (bool): + execute the experiment with no flux pulse applied + disabled_cz_duration_ns (int): + waiting time to emulate the flux pulse + wait_time_after_flux_ns (int): + additional waiting time (in ns) after the flux pulse, before + the final afterrotations + + """ + if MC is None: + MC = self.instr_MC.get_instr() + assert q0 in self.qubits() + assert q1 in self.qubits() + q0idx = self.find_instrument(q0).cfg_qubit_nr() + q1idx = self.find_instrument(q1).cfg_qubit_nr() + list_qubits_used = [q0, q1] + if q2 is None: + q2idx = None + else: + q2idx = self.find_instrument(q2).cfg_qubit_nr() + list_qubits_used.append(q2) + if q3 is None: + q3idx = None + else: + q3idx = self.find_instrument(q3).cfg_qubit_nr() + list_qubits_used.append(q3) + + if prepare_for_timedomain: + self.prepare_readout(qubits=list_qubits_used) + self.prepare_fluxing(qubits=list_qubits_used) + for q in list_qubits_used: #only on the CZ qubits we add the ef pulses + self.find_instrument(q)._prep_td_sources() + mw_lutman = self.find_instrument(q).instr_LutMan_MW.get_instr() + lm = mw_lutman.LutMap() + # we hardcode the X on the ef transition to CW 31 here. + lm[31] = {"name": "rX12", "theta": 180, "phi": 0, "type": "ef"} + # load_phase_pulses will also upload other waveforms + mw_lutman.load_phase_pulses_to_AWG_lookuptable() + mw_lutman.load_waveforms_onto_AWG_lookuptable( + regenerate_waveforms=True) + + # These are hardcoded angles in the mw_lutman for the AWG8 + # only x2 and x3 downsample_swp_points available + angles = np.arange(0, 341, 20 * downsample_swp_points) + + if parked_qubit_seq is None: + parked_qubit_seq = "ramsey" if q2 is not None else "ground" + + p = mqo.conditional_oscillation_seq( + q0idx, + q1idx, + q2idx, + q3idx, + platf_cfg=self.cfg_openql_platform_fn(), + disable_cz=disable_cz, + disabled_cz_duration=disabled_cz_duration_ns, + angles=angles, + wait_time_before_flux=wait_time_before_flux_ns, + wait_time_after_flux=wait_time_after_flux_ns, + flux_codeword=flux_codeword, + flux_codeword_park=flux_codeword_park, + cz_repetitions=cz_repetitions, + parked_qubit_seq=parked_qubit_seq, + disable_parallel_single_q_gates=disable_parallel_single_q_gates + ) + + s = swf.OpenQL_Sweep( + openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name="Phase", + unit="deg", + ) + MC.set_sweep_function(s) + MC.set_sweep_points(p.sweep_points) + + measured_qubits = [q0,q1] + if q2 is not None: + measured_qubits.append(q2) + if q3 is not None: + measured_qubits.append(q3) + + MC.set_detector_function(self.get_int_avg_det(qubits=measured_qubits)) + + MC.run( + "conditional_oscillation_{}_{}_&_{}_{}_x{}_wb{}_wa{}{}{}{}".format( + q0, q1, q2, q3, cz_repetitions, + wait_time_before_flux_ns, wait_time_after_flux_ns,parked_qubit_seq, + self.msmt_suffix, label, + ), + disable_snapshot_metadata=disable_metadata, + ) + + # [2020-06-24] parallel cz not supported (yet) + # should be implemented by just running the analysis twice with + # corresponding channels + + options_dict = { + 'ch_idx_osc': 0, + 'ch_idx_spec': 1 + } + + if q2 is not None: + options_dict['ch_idx_park'] = 2 + + a = ma2.Conditional_Oscillation_Analysis( + options_dict=options_dict, + extract_only=extract_only) + + return a + + def measure_conditional_oscillation_multi( + self, + pairs: list, + parked_qbs: list, + flux_codeword="cz", + phase_offsets:list = None, + parked_qubit_seq=None, + downsample_swp_points=1, # x2 and x3 available + prepare_for_timedomain=True, + MC=None, + disable_cz: bool = False, + disabled_cz_duration_ns: int = 60, + cz_repetitions: int = 1, + wait_time_before_flux_ns: int = 0, + wait_time_after_flux_ns: int = 0, + disable_parallel_single_q_gates: bool = False, + label="", + verbose=True, + disable_metadata=False, + extract_only=False, + ): + """ + Measures the "conventional cost function" for the CZ gate that + is a conditional oscillation. In this experiment the conditional phase + in the two-qubit Cphase gate is measured using Ramsey-like sequence. + Specifically qubit q0 of each pair is prepared in the superposition, while q1 is in 0 or 1 state. + Next the flux pulse is applied. Finally pi/2 afterrotation around various axes + is applied to q0, and q1 is flipped back (if neccessary) to 0 state. + Plotting the probabilities of the zero state for each qubit as a function of + the afterrotation axis angle, and comparing case of q1 in 0 or 1 state, enables to + measure the conditional phase and estimale the leakage of the Cphase gate. + + Refs: + Rol arXiv:1903.02492, Suppl. Sec. D + IARPA M6 for the flux-dance, not publicly available + + Args: + pairs (lst(lst)): + Contains all pairs with the order (q0,q1) where q0 in 'str' is the target and q1 in + 'str' is the control. This is based on qubits that are parked in the flux-dance. + + parked_qbs(lst): + Contains a list of all qubits that are required to be parked. + This is based on qubits that are parked in the flux-dance. + + flux_codeword (str): + the gate to be applied to the qubit pair [q0, q1] + + flux_codeword_park (str): + optionally park qubits. This is designed according to the flux-dance. if + one has to measure a single pair, has to provide more qubits for parking. + Problem here is parked qubits are hardcoded in cc config, thus one has to include the extra + parked qubits in this file. + (single qubit operation on q2) or a 'cz' pulse on q2-q3. + NB: depending on the CC configurations the parking can be + implicit in the main `cz` + + prepare_for_timedomain (bool): + should the insruments be reconfigured for time domain measurement + + disable_cz (bool): + execute the experiment with no flux pulse applied + + disabled_cz_duration_ns (int): + waiting time to emulate the flux pulse + + wait_time_before_flux_ns (int): + additional waiting time (in ns) before the flux pulse. + + wait_time_after_flux_ns (int): + additional waiting time (in ns) after the flux pulse, before + the final afterrotations + + """ + + if self.ro_acq_weight_type() != 'optimal': + # this occurs because the detector groups qubits per feedline. + # If you do not pay attention, this will mess up the analysis of + # this experiment. + raise ValueError('Current conditional analysis is not working with {}'.format(self.ro_acq_weight_type())) + + if MC is None: + MC = self.instr_MC.get_instr() + + Q_idxs_target = [] + Q_idxs_control = [] + Q_idxs_parked = [] + list_qubits_used = [] + ramsey_qubits = [] + + for i,pair in enumerate(pairs): + # print ( 'Pair (target,control) {} : ({},{})'. format(i+1,pair[0],pair[1])) + assert pair[0] in self.qubits() + assert pair[1] in self.qubits() + Q_idxs_target += [self.find_instrument(pair[0]).cfg_qubit_nr()] + Q_idxs_control += [self.find_instrument(pair[1]).cfg_qubit_nr()] + list_qubits_used += [pair[0], pair[1]] + ramsey_qubits += [pair[0]] + + # print('Q_idxs_target : {}'.format(Q_idxs_target)) + # print('Q_idxs_control : {}'.format(Q_idxs_control)) + # print('list_qubits_used : {}'.format(list_qubits_used)) + + if parked_qbs is not None: + Q_idxs_parked = [self.find_instrument(Q).cfg_qubit_nr() for Q in parked_qbs] + + if prepare_for_timedomain: + for i, q in enumerate(list_qubits_used): + mw_lutman = self.find_instrument(q).instr_LutMan_MW.get_instr() + mw_lutman.set_default_lutmap() + lm = mw_lutman.LutMap() + lm[27] = {'name': 'rXm180', 'phi': 0, 'theta': -180, 'type': 'ge'} + # This is awkward because cw9 should have rx12 by default + lm[31] = {"name": "rX12", "theta": 180, "phi": 0, "type": "ef"} + mw_lutman.LutMap(lm) + + self.prepare_for_timedomain(qubits=list_qubits_used) + + for i, q in enumerate(np.concatenate([ramsey_qubits])): + mw_lutman = self.find_instrument(q).instr_LutMan_MW.get_instr() + # load_phase_pulses will also upload other waveforms + if phase_offsets == None: + mw_lutman.load_phase_pulses_to_AWG_lookuptable() + else: + mw_lutman.load_phase_pulses_to_AWG_lookuptable( + phases=np.arange(0,360,20)+phase_offsets[i]) + mw_lutman.load_waveforms_onto_AWG_lookuptable( + regenerate_waveforms=True) + # prepare_parked qubits + for q in parked_qbs: + fl_lm = self.find_instrument(q).instr_LutMan_Flux.get_instr() + fl_lm.load_waveform_onto_AWG_lookuptable( + wave_id='park', regenerate_waveforms=True) + + # These are hardcoded angles in the mw_lutman for the AWG8 + # only x2 and x3 downsample_swp_points available + angles = np.arange(0, 341, 20 * downsample_swp_points) + + p = mqo.conditional_oscillation_seq_multi( + Q_idxs_target, + Q_idxs_control, + Q_idxs_parked, + platf_cfg=self.cfg_openql_platform_fn(), + disable_cz=disable_cz, + disabled_cz_duration=disabled_cz_duration_ns, + angles=angles, + wait_time_before_flux=wait_time_before_flux_ns, + wait_time_after_flux=wait_time_after_flux_ns, + flux_codeword=flux_codeword, + cz_repetitions=cz_repetitions, + parked_qubit_seq=parked_qubit_seq, + disable_parallel_single_q_gates=disable_parallel_single_q_gates + ) + + s = swf.OpenQL_Sweep( + openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name="Phase", + unit="deg", + ) + + MC.set_sweep_function(s) + MC.set_sweep_points(p.sweep_points) + d = self.get_int_avg_det(qubits=list_qubits_used) + MC.set_detector_function(d) + + MC.run( + "conditional_oscillation_{}_x{}_{}{}".format( + list_qubits_used, cz_repetitions, + self.msmt_suffix, label, + ), + disable_snapshot_metadata=disable_metadata, + ) + + if len(pairs) > 1: + # qb_ro_order = np.sum([ list(self._acq_ch_map[key].keys()) for key in self._acq_ch_map.keys()]) + # qubits_by_feedline = [['D1','X1'], + # ['D2','Z1','D3','D4','D5','D7','X2','X3','Z3'], + # ['D6','D8','D9','X4','Z2','Z4']] + # qb_ro_order = sorted(np.array(pairs).flatten().tolist(), + # key=lambda x: [i for i,qubits in enumerate(qubits_by_feedline) if x in qubits]) + qb_ro_order = [qb for qb_dict in self._acq_ch_map.values() for qb in qb_dict.keys()] + else: + # qb_ro_order = [ list(self._acq_ch_map[key].keys()) for key in self._acq_ch_map.keys()][0] + qb_ro_order = [pairs[0][0], pairs[0][1]] + + result_dict = {} + for i, pair in enumerate(pairs): + ch_osc = qb_ro_order.index(pair[0]) + ch_spec= qb_ro_order.index(pair[1]) + + options_dict = { + 'ch_idx_osc': ch_osc, + 'ch_idx_spec': ch_spec + } + a = ma2.Conditional_Oscillation_Analysis( + options_dict=options_dict, + extract_only=extract_only) + + result_dict['pair_{}_delta_phi_a'.format(i+1)] = \ + a.proc_data_dict['quantities_of_interest']['phi_cond'].n % 360 + + result_dict['pair_{}_missing_frac_a'.format(i+1)] = \ + a.proc_data_dict['quantities_of_interest']['missing_fraction'].n + + result_dict['pair_{}_offset_difference_a'.format(i+1)] = \ + a.proc_data_dict['quantities_of_interest']['offs_diff'].n + + result_dict['pair_{}_phi_0_a'.format(i+1)] = \ + (a.proc_data_dict['quantities_of_interest']['phi_0'].n+180) % 360 - 180 + + result_dict['pair_{}_phi_1_a'.format(i+1)] = \ + (a.proc_data_dict['quantities_of_interest']['phi_1'].n+180) % 360 - 180 + + return result_dict + + def measure_flux_arc_dc_conditional_oscillation( + self, + qubit_high: str, + qubit_low: str, + flux_array: Optional[np.ndarray] = None, + flux_sample_points: int = 21, + disable_metadata: bool = False, + prepare_for_timedomain: bool = True, + analyze: bool = True, + ): + assert self.ro_acq_weight_type() == 'optimal', "Expects device acquisition weight type to be 'optimal'" + + # Get instruments + nested_MC = self.instr_nested_MC.get_instr() + qubit_high_instrument = self.find_instrument(qubit_high) + _flux_instrument = qubit_high_instrument.instr_FluxCtrl.get_instr() + _flux_parameter = _flux_instrument[f'FBL_{qubit_high}'] + qubits_awaiting_prepare = [qubit_high, qubit_low] + self.prepare_readout(qubits_awaiting_prepare) + parked_qubits = get_parking_qubits(qubit_high, qubit_low) + + original_current: float = qubit_high_instrument.fl_dc_I0() + if flux_array is None: + flux_array = np.linspace(-30e-6, 30e-6, flux_sample_points) + original_current + + local_prepare = ManualParameter('local_prepare', initial_value=prepare_for_timedomain) + local_metadata = ManualParameter('local_metadata', initial_value=disable_metadata) + def wrapper(): + a = self.measure_conditional_oscillation_multi( + pairs=[[qubit_high, qubit_low]], + parked_qbs=parked_qubits, + disable_metadata=local_metadata(), + prepare_for_timedomain=local_prepare(), + extract_only=True, + ) + # Turn off prepare and metadata for followup measurements + local_prepare(False) + local_metadata(False) + return { + 'pair_1_delta_phi_a': a['pair_1_delta_phi_a'], + 'pair_1_missing_frac_a': a['pair_1_missing_frac_a'], + 'pair_1_offset_difference_a': a['pair_1_offset_difference_a'], + 'pair_1_phi_0_a': a['pair_1_phi_0_a'], + 'pair_1_phi_1_a': a['pair_1_phi_1_a'], + } + + d = det.Function_Detector( + wrapper, + result_keys=['pair_1_missing_frac_a', 'pair_1_delta_phi_a', 'pair_1_phi_0_a'], + value_names=['missing_fraction', 'phi_cond', 'phi_0'], + value_units=['a.u.', 'degree', 'degree'], + ) + + nested_MC.set_detector_function(d) + nested_MC.set_sweep_function(_flux_parameter) + nested_MC.set_sweep_points(np.atleast_1d(flux_array)) + + response = None + label = f'conditional_oscillation_dc_flux_arc_{qubit_high}' + try: + response = nested_MC.run(label, disable_snapshot_metadata=disable_metadata) + except Exception as e: + log.warn(e) + finally: + _flux_parameter(original_current) + if analyze: + a = ma2.FineBiasAnalysis( + initial_bias=original_current, + label=label, + ) + a.run_analysis() + return a + return response + + + def measure_residual_ZZ_coupling( + self, + q0: str, + q_spectators: list, + spectator_state="0", + times=np.linspace(0, 10e-6, 26), + analyze: bool = True, + close_fig: bool = True, + prepare_for_timedomain: bool = True, + MC=None, + ): + + assert q0 in self.qubits() + for q_s in q_spectators: + assert q_s in self.qubits() + + all_qubits = [q0] + q_spectators + if prepare_for_timedomain: + self.prepare_for_timedomain(qubits=all_qubits) + if MC is None: + MC = self.instr_MC.get_instr() + + q0idx = self.find_instrument(q0).cfg_qubit_nr() + q_spec_idx_list = [ + self.find_instrument(q_s).cfg_qubit_nr() for q_s in q_spectators + ] + + p = mqo.residual_coupling_sequence( + times, + q0idx, + q_spec_idx_list, + spectator_state, + self.cfg_openql_platform_fn(), + ) + s = swf.OpenQL_Sweep(openql_program=p, CCL=self.instr_CC.get_instr()) + d = self.get_int_avg_det(qubits=all_qubits) + MC.set_sweep_function(s) + MC.set_sweep_points(times) + MC.set_detector_function(d) + MC.run('Residual_ZZ_{}_{}_{}{}'.format(q0, q_spectators, spectator_state, self.msmt_suffix), + exp_metadata={'target_qubit': q0, + 'spectator_qubits': str(q_spectators), + 'spectator_state': spectator_state}) + if analyze: + a = ma.MeasurementAnalysis(close_main_fig=close_fig) + return a + + def measure_ssro_multi_qubit( + self, + qubits: list, + f_state: bool = False, + nr_shots_per_case: int = 2**13, # 8192 + prepare_for_timedomain: bool = True, + result_logging_mode='raw', + initialize: bool = False, + analyze=True, + label='Mux_SSRO', + MC=None): + """ + Perform a simultaneous ssro experiment on multiple qubits. + Args: + qubits (list of str) + list of qubit names + nr_shots_per_case (int): + total number of measurements for each case under consideration + e.g., n*|00> , n*|01>, n*|10> , n*|11> for two qubits + + + """ + log.info("{}.measure_ssro_multi_qubit for qubits{}".format(self.name, qubits)) + + # # off and on, not including post selection init measurements yet + # nr_cases = 2**len(qubits) # e.g., 00, 01 ,10 and 11 in the case of 2q + # nr_shots = nr_shots_per_case*nr_cases + + # off and on, not including post selection init measurements yet + if f_state: + nr_cases = 3 ** len(qubits) + else: + nr_cases = 2 ** len(qubits) + + if initialize: + nr_shots = 2 * nr_shots_per_case * nr_cases + else: + nr_shots = nr_shots_per_case * nr_cases + + self.ro_acq_digitized(False) + + if prepare_for_timedomain: + self.prepare_for_timedomain(qubits, bypass_flux=True) + if MC is None: + MC = self.instr_MC.get_instr() + + d = self.get_int_logging_detector( + qubits, result_logging_mode=result_logging_mode + ) + # Check detector order + det_qubits, _idxs = np.unique([ name.split(' ')[2] + for name in d.value_names], return_index=True) + det_qubits = det_qubits[_idxs] + if not all(qubits != det_qubits): + # this occurs because the detector groups qubits per feedline. + # If you do not pay attention, this will mess up the analysis of + # this experiment. + print('Detector qubits do not match order specified.{} vs {}'.format(qubits, det_qubits)) + qubits = det_qubits + + qubit_idxs = [self.find_instrument(qn).cfg_qubit_nr() for qn in qubits] + p = mqo.multi_qubit_off_on( + qubit_idxs, + initialize=initialize, + second_excited_state=f_state, + platf_cfg=self.cfg_openql_platform_fn(), + ) + s = swf.OpenQL_Sweep(openql_program=p, CCL=self.instr_CC.get_instr()) + + + shots_per_meas = int( + np.floor(np.min([2**20, nr_shots]) / nr_cases) * nr_cases + ) + + d.set_child_attr("nr_shots", shots_per_meas) + + old_soft_avg = MC.soft_avg() + old_live_plot_enabled = MC.live_plot_enabled() + MC.soft_avg(1) + MC.live_plot_enabled(False) + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(nr_shots)) + MC.set_detector_function(d) + MC.run("{}_{}_{}".format(label, qubits, self.msmt_suffix)) + MC.soft_avg(old_soft_avg) + MC.live_plot_enabled(old_live_plot_enabled) + + if analyze: + if initialize: + thresholds = [ + self.find_instrument(qubit).ro_acq_threshold() + for qubit in qubits] + a = ma2.Multiplexed_Readout_Analysis( + label=label, + nr_qubits=len(qubits), + post_selection=True, + post_selec_thresholds=thresholds) + # Print fraction of discarded shots + # Dict = a.proc_data_dict['Post_selected_shots'] + # key = next(iter(Dict)) + # fraction=0 + # for comb in Dict[key].keys(): + # fraction += len(Dict[key][comb])/(2**12 * 4) + # print('Fraction of discarded results was {:.2f}'.format(1-fraction)) + else: + a = ma2.Multiplexed_Readout_Analysis( + label=label, + nr_qubits=len(qubits)) + # Set thresholds + for i, qubit in enumerate(qubits): + label = a.Channels[i] + threshold = a.qoi[label]['threshold_raw'] + self.find_instrument(qubit).ro_acq_threshold(threshold) + return + + def measure_ssro_single_qubit( + self, + qubits: list, + q_target: str, + nr_shots: int = 2**13, # 8192 + prepare_for_timedomain: bool = True, + second_excited_state: bool = False, + result_logging_mode='raw', + initialize: bool = False, + analyze=True, + shots_per_meas: int = 2**16, + nr_flux_dance:int=None, + wait_time :float=None, + label='Mux_SSRO', + MC=None): + ''' + Performs MUX single shot readout experiments of all possible + combinations of prepared states of . Outputs analysis + of a single qubit . This function is meant to + assess a particular qubit readout in the multiplexed context. + + Args: + qubits: List of qubits adressed in the mux readout. + + q_target: Qubit targeted in the analysis. + + nr_shots: number of shots for each prepared state of + q_target. That is the experiment will include + shots of the qubit prepared in the ground state + and shots of the qubit prepared in the excited + state. The remaining qubits will be prepared such that the + experiment goes through all 2**n possible combinations of + computational states. + + initialize: Include measurement post-selection by + initialization. + ''' + + log.info('{}.measure_ssro_multi_qubit for qubits{}'.format( + self.name, qubits)) + + # off and on, not including post selection init measurements yet + nr_cases = 2 ** len(qubits) # e.g., 00, 01 ,10 and 11 in the case of 2q + if second_excited_state: + nr_cases = 3 ** len(qubits) + + if initialize == True: + nr_shots = 4 * nr_shots + else: + nr_shots = 2 * nr_shots + + if prepare_for_timedomain: + self.prepare_for_timedomain(qubits) + if MC is None: + MC = self.instr_MC.get_instr() + + qubit_idxs = [self.find_instrument(qn).cfg_qubit_nr() + for qn in qubits] + + p = mqo.multi_qubit_off_on(qubit_idxs, + initialize=initialize, + nr_flux_dance=nr_flux_dance, + wait_time = wait_time, + second_excited_state=second_excited_state, + platf_cfg=self.cfg_openql_platform_fn()) + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr()) + + # right is LSQ + d = self.get_int_logging_detector(qubits, + result_logging_mode=result_logging_mode) + + # This assumes qubit names do not contain spaces + det_qubits = [v.split()[-1] for v in d.value_names] + if (qubits != det_qubits) and (self.ro_acq_weight_type() == 'optimal'): + # this occurs because the detector groups qubits per feedline. + # If you do not pay attention, this will mess up the analysis of + # this experiment. + raise ValueError('Detector qubits do not match order specified.{} vs {}'.format(qubits, det_qubits)) + + shots_per_meas = int(np.floor( + np.min([shots_per_meas, nr_shots])/nr_cases)*nr_cases) + + d.set_child_attr('nr_shots', shots_per_meas) + + old_soft_avg = MC.soft_avg() + old_live_plot_enabled = MC.live_plot_enabled() + MC.soft_avg(1) + MC.live_plot_enabled(False) + + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(nr_shots)) + MC.set_detector_function(d) + MC.run('{}_{}_{}'.format(label, q_target, self.msmt_suffix)) + + MC.soft_avg(old_soft_avg) + MC.live_plot_enabled(old_live_plot_enabled) + + if analyze: + if initialize == True: + thresholds = [self.find_instrument(qubit).ro_acq_threshold() \ + for qubit in qubits] + a = ma2.Multiplexed_Readout_Analysis(label=label, + nr_qubits = len(qubits), + q_target = q_target, + post_selection=True, + post_selec_thresholds=thresholds) + # Print fraction of discarded shots + #Dict = a.proc_data_dict['Post_selected_shots'] + #key = next(iter(Dict)) + #fraction=0 + #for comb in Dict[key].keys(): + # fraction += len(Dict[key][comb])/(2**12 * 4) + #print('Fraction of discarded results was {:.2f}'.format(1-fraction)) + else: + a = ma2.Multiplexed_Readout_Analysis(label=label, + nr_qubits=len(qubits), + q_target=q_target) + q_ch = [ch for ch in a.Channels if q_target in ch.decode()][0] + # Set thresholds + for i, qubit in enumerate(qubits): + label = a.raw_data_dict['value_names'][i] + threshold = a.qoi[label]['threshold_raw'] + self.find_instrument(qubit).ro_acq_threshold(threshold) + return a.qoi[q_ch] + + def measure_MUX_SSRO(self, + qubits: list, + f_state: bool = False, + nr_shots_per_case: int = 2**13, + heralded_init: bool = False, + prepare_for_timedomain: bool = True, + analyze: bool = True, + disable_metadata: bool = False): + ''' + Measures single shot readout multiplexed assignment fidelity matrix. + Supports second excited state as well! + ''' + assert self.ro_acq_digitized() == False, 'Analog readout required' + assert 'IQ' in self.ro_acq_weight_type(), 'IQ readout is required!' + MC = self.instr_MC.get_instr() + # Configure lutmap + for qubit in qubits: + qb = self.find_instrument(qubit) + mwl = qb.instr_LutMan_MW.get_instr() + mwl.set_default_lutmap() + if prepare_for_timedomain: + self.prepare_for_timedomain(qubits = qubits) + + # get qubit idx + Q_idxs = [self.find_instrument(q).cfg_qubit_nr() for q in qubits] + # Set UHF number of shots + _cycle = (2**len(qubits)) + _states = ['0', '1'] + if f_state: + _cycle = (3**len(qubits)) + _states = ['0', '1', '2'] + nr_shots = _cycle*nr_shots_per_case + if heralded_init: + nr_shots *= 2 + uhfqc_max_shots = 2**20 + if nr_shots < uhfqc_max_shots: + # all shots can be acquired in a single UHF run + shots_per_run = nr_shots + else: + # Number of UHF acquisition runs + nr_runs = ceil(nr_shots/uhfqc_max_shots) + shots_per_run = int((nr_shots/nr_runs)/_cycle)*_cycle + nr_shots = nr_runs*shots_per_run + # Compile sequence + p = mqo.MUX_RO_sequence( + qubit_idxs = Q_idxs, + heralded_init = heralded_init, + states = _states, + platf_cfg=self.cfg_openql_platform_fn()) + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name='Shot', unit='#', + upload=True) + MC.soft_avg(1) + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(nr_shots)) + d = self.get_int_logging_detector(qubits=qubits) + for det in d.detectors: + det.nr_shots = shots_per_run + MC.set_detector_function(d) + MC.live_plot_enabled(False) + label = f'MUX_SSRO_{"_".join(qubits)}' + MC.run(label+self.msmt_suffix, disable_snapshot_metadata=disable_metadata) + # MC.live_plot_enabled(True) + # Analysis + if analyze: + ma2.ra.Multiplexed_Readout_Analysis( + qubits=qubits, + f_state=f_state, + heralded_init=heralded_init, + label=label) + + def measure_transients(self, + qubits: list, + q_target: str, + cases: list = ['off', 'on'], + MC=None, + prepare_for_timedomain: bool = True, + analyze: bool = True): + ''' + Documentation. + ''' + if q_target not in qubits: + raise ValueError("q_target must be included in qubits.") + # Ensure all qubits use same acquisition instrument + instruments = [self.find_instrument(q).instr_acquisition() for q in qubits] + if instruments[1:] != instruments[:-1]: + raise ValueError("All qubits must have common acquisition instrument") + + qubits_nr = [self.find_instrument(q).cfg_qubit_nr() for q in qubits] + q_target_nr = self.find_instrument(q_target).cfg_qubit_nr() + + if MC is None: + MC = self.instr_MC.get_instr() + if prepare_for_timedomain: + self.prepare_for_timedomain(qubits) + + p = mqo.targeted_off_on( + qubits=qubits_nr, + q_target=q_target_nr, + pulse_comb='on', + platf_cfg=self.cfg_openql_platform_fn() + ) + + analysis = [None for case in cases] + for i, pulse_comb in enumerate(cases): + if 'off' in pulse_comb.lower(): + self.find_instrument(q_target).instr_LO_mw.get_instr().off() + elif 'on' in pulse_comb.lower(): + self.find_instrument(q_target).instr_LO_mw.get_instr().on() + else: + raise ValueError( + "pulse_comb {} not understood: Only 'on' and 'off' allowed.". + format(pulse_comb)) + + s = swf.OpenQL_Sweep(openql_program=p, + parameter_name='Transient time', unit='s', + CCL=self.instr_CC.get_instr()) + + if 'UHFQC' in instruments[0]: + sampling_rate = 1.8e9 + else: + raise NotImplementedError() + nr_samples = self.ro_acq_integration_length()*sampling_rate + + d = det.UHFQC_input_average_detector( + UHFQC=self.find_instrument(instruments[0]), + AWG=self.instr_CC.get_instr(), + nr_averages=self.ro_acq_averages(), + nr_samples=int(nr_samples)) + + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(nr_samples)/sampling_rate) + MC.set_detector_function(d) + MC.run('Mux_transients_{}_{}_{}'.format(q_target, pulse_comb, + self.msmt_suffix)) + if analyze: + analysis[i] = ma2.Multiplexed_Transient_Analysis( + q_target='{}_{}'.format(q_target, pulse_comb)) + return analysis + + def calibrate_optimal_weights_mux(self, + qubits: list, + q_target: str, + update=True, + verify=True, + averages=2**15, + return_analysis=True + ): + + """ + Measures the multiplexed readout transients of for + in ground and excited state. After that, it calculates optimal + integration weights that are used to weigh measuremet traces to maximize + the SNR. + + Args: + qubits (list): + List of strings specifying qubits included in the multiplexed + readout signal. + q_target (str): + () + verify (bool): + indicates whether to run measure_ssro at the end of the routine + to find the new SNR and readout fidelities with optimized weights + update (bool): + specifies whether to update the weights in the qubit object + """ + if q_target not in qubits: + raise ValueError("q_target must be included in qubits.") + + # Ensure that enough averages are used to get accurate weights + old_avg = self.ro_acq_averages() + self.ro_acq_averages(averages) + + Q_target = self.find_instrument(q_target) + # Transient analysis + A = self.measure_transients(qubits=qubits, q_target=q_target, + cases=['on', 'off']) + #return parameters + self.ro_acq_averages(old_avg) + + # Optimal weights + B = ma2.Multiplexed_Weights_Analysis(q_target=q_target, + IF=Q_target.ro_freq_mod(), + pulse_duration=Q_target.ro_pulse_length(), + A_ground=A[1], A_excited=A[0]) + + if update: + Q_target.ro_acq_weight_func_I(B.qoi['W_I']) + Q_target.ro_acq_weight_func_Q(B.qoi['W_Q']) + Q_target.ro_acq_weight_type('optimal') + + if verify: + Q_target._prep_ro_integration_weights() + Q_target._prep_ro_instantiate_detectors() + ssro_dict= self.measure_ssro_single_qubit(qubits=qubits, + q_target=q_target) + if return_analysis: + return ssro_dict + else: + return True + + def measure_msmt_induced_dephasing( + self, + meas_qubits: list, + target_qubits: list, + measurement_time_ns: int, + echo_times: list = None, + echo_phases: list = None, + disable_metadata=False, + prepare_for_timedomain: bool=True): + + assert self.ro_acq_digitized() == False + assert self.ro_acq_weight_type() == 'optimal' + ################### + # setup qubit idxs + ################### + all_qubits = meas_qubits+target_qubits + meas_idxs = [ self.find_instrument(q).cfg_qubit_nr() for q in meas_qubits ] + target_idxs = [ self.find_instrument(q).cfg_qubit_nr() for q in target_qubits ] + ########################################### + # RO preparation (assign res_combinations) + ########################################### + RO_lms = np.unique([self.find_instrument(q).instr_LutMan_RO() for q in all_qubits]) + qubit_RO_lm = { self.find_instrument(q).cfg_qubit_nr() : + (self.find_instrument(q).name, + self.find_instrument(q).instr_LutMan_RO()) for q in all_qubits } + main_qubits = [] # qubits that belong to RO lm where there is an ancilla + exception_qubits = [] # qubits that belong to RO lm without ancilla + res_combs = {} + for lm in RO_lms: + res_combs[lm] = [] + comb1 = [] # used for target+meas_qubits + comb2 = [] # used for only target qubits + comb3 = [] # used for only meas qubits + targ_q_in_lm = [] + meas_q_in_lm = [] + # Sort resontator combinations + for idx in meas_idxs+target_idxs: + if qubit_RO_lm[idx][1] == lm: + comb1 += [idx] + comb2 += [idx] + res_combs[lm] += [comb1] + for idx in meas_idxs: + if qubit_RO_lm[idx][1] == lm: + comb2.remove(idx) + comb3 += [idx] + if comb2 != comb1: + res_combs[lm] += [comb2] + if len(comb3) != 0: + res_combs[lm] += [comb3] + # Sort main and exception qubits + for idx in meas_idxs+target_idxs: + if qubit_RO_lm[idx][1] == lm: + if qubit_RO_lm[idx][0] in target_qubits: + targ_q_in_lm.append(qubit_RO_lm[idx][0]) + if qubit_RO_lm[idx][0] in meas_qubits: + meas_q_in_lm.append(qubit_RO_lm[idx][0]) + if len(meas_q_in_lm) == 0: + exception_qubits += targ_q_in_lm + else: + main_qubits += meas_q_in_lm + main_qubits += targ_q_in_lm + # Time-domain preparation + ordered_qubits = main_qubits+exception_qubits + if prepare_for_timedomain: + self.prepare_for_timedomain(ordered_qubits, bypass_flux=True) + for lm in RO_lms: + ro_lm = self.find_instrument(lm) + ro_lm.resonator_combinations(res_combs[lm]) + ro_lm.load_DIO_triggered_sequence_onto_UHFQC() + for i, q in enumerate(target_qubits): + mw_lm = self.find_instrument(f'MW_lutman_{q}') + if echo_times != None: + assert echo_phases != None + print(f'Echo phase upload on {mw_lm.name}') + mw_lm.LutMap()[30] = {'name': 'rEcho', 'theta': 180, + 'phi': echo_phases[i], 'type': 'ge'} + mw_lm.LutMap()[27] = {"name": "rXm180", "theta": -180, + "phi": 0, "type": "ge"} + mw_lm.LutMap()[28] = {"name": "rYm180", "theta": -180, + "phi": 90, "type": "ge"} + mw_lm.load_phase_pulses_to_AWG_lookuptable() + if exception_qubits != []: + exception_idxs = [self.find_instrument(q).cfg_qubit_nr() + for q in exception_qubits] + else: + exception_idxs = None + + p = mqo.Msmt_induced_dephasing_ramsey( + q_rams = target_idxs, + q_meas = meas_idxs, + echo_times = echo_times, + meas_time = measurement_time_ns, + exception_qubits=exception_idxs, + platf_cfg=self.cfg_openql_platform_fn()) + + d = self.get_int_avg_det(qubits=ordered_qubits) + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr()) + MC = self.instr_MC.get_instr() + MC.soft_avg(1) + # MC.live_plot_enabled(True) + MC.set_sweep_function(s) + sw_pts = np.concatenate((np.repeat(np.arange(0, 360, 20), 6), + np.array([360, 361, 362, 364]))) + MC.set_sweep_points(sw_pts) + MC.set_detector_function(d) + if isinstance(echo_times, str): + echo_seq = echo_times + elif isinstance(echo_times, type(None)): + echo_seq = 'None' + elif isinstance(echo_times, list): + echo_seq = 'single_echo' + MC.run(f'Msmt_induced_dephasing_echo_seq_{echo_seq}', + disable_snapshot_metadata=disable_metadata) + a = ma2.mra.measurement_dephasing_analysis( + meas_time=measurement_time_ns*1e-9, + target_qubits= target_qubits, + exception_qubits=exception_qubits) + + def measure_chevron( + self, + q0: str, + q_spec: str, + q_parks=None, + amps=np.arange(0, 1, 0.05), + lengths=np.arange(5e-9, 51e-9, 5e-9), + adaptive_sampling=False, + adaptive_sampling_pts=None, + adaptive_pars: dict = None, + prepare_for_timedomain=True, + MC=None, + freq_tone=6e9, + pow_tone=-10, + spec_tone=False, + buffer_time=0, + target_qubit_sequence: str = "ramsey", + waveform_name="square", + recover_q_spec: bool = False, + second_excited_state: bool = False, + disable_metadata: bool = False, + ): + """ + Measure a chevron patter of esulting from swapping of the excitations + of the two qubits. Qubit q0 is prepared in 1 state and flux-pulsed + close to the interaction zone using (usually) a rectangular pulse. + Meanwhile q1 is prepared in 0, 1 or superposition state. If it is in 0 + state flipping between 01-10 can be observed. It if is in 1 state flipping + between 11-20 as well as 11-02 show up. In superpostion everything is visible. + + Args: + q0 (str): + flux-pulsed qubit (prepared in 1 state at the beginning) + q_spec (str): + stationary qubit (in 0, 1 or superposition) + q_parks (list): + qubits to move out of the interaction zone by applying a + square flux pulse. Note that this is optional. Not specifying + this means no extra pulses are applied. + Note that this qubit is not read out. + + amps (array): + amplitudes of the applied flux pulse controlled via the amplitude + of the correspnding AWG channel + + lengths (array): + durations of the applied flux pulses + + adaptive_sampling (bool): + indicates whether to adaptivelly probe + values of ampitude and duration, with points more dense where + the data has more fine features + + adaptive_sampling_pts (int): + number of points to measur in the adaptive_sampling mode + + prepare_for_timedomain (bool): + should all instruments be reconfigured to + time domain measurements + + target_qubit_sequence (str {"ground", "extited", "ramsey"}): + specifies whether the spectator qubit should be + prepared in the 0 state ('ground'), 1 state ('extited') or + in superposition ('ramsey') + + spec_tone (bool): + uses the spectroscopy source (in CW mode) of the qubit to produce + a fake chevron. + + freq_tone (float): + When spec_tone = True, controls the frequency of the spec source + + pow_tone (float): + When spec_tone = True, controls the power of the spec source + + recover_q_spec (bool): + applies the first gate of qspec at the end as well if `True` + + second_excited_state (bool): + Applies f12 transition pulse before flux pulse. + + Circuit: + q0 -x180-flux-x180-RO- + qspec --x90-----(x90)-RO- (target_qubit_sequence='ramsey') + + q0 -x180-flux-x180-RO- + qspec -x180----(x180)-RO- (target_qubit_sequence='excited') + + q0 -x180-flux-x180-RO- + qspec ----------------RO- (target_qubit_sequence='ground') + """ + if MC is None: + MC = self.instr_MC.get_instr() + + assert q0 in self.qubits() + assert q_spec in self.qubits() + + q0idx = self.find_instrument(q0).cfg_qubit_nr() + q_specidx = self.find_instrument(q_spec).cfg_qubit_nr() + if q_parks is not None: + q_park_idxs = [self.find_instrument(q_park).cfg_qubit_nr() for q_park in q_parks] + for q_park in q_parks: + q_park_idx = self.find_instrument(q_park).cfg_qubit_nr() + fl_lutman_park = self.find_instrument(q_park).instr_LutMan_Flux.get_instr() + if fl_lutman_park.park_amp() < 0.1: + # This can cause weird behaviour if not paid attention to. + log.warning("Square amp for park pulse < 0.1") + if fl_lutman_park.park_length() < np.max(lengths): + log.warning("Square length shorter than max Chevron length") + else: + q_park_idxs = None + + fl_lutman = self.find_instrument(q0).instr_LutMan_Flux.get_instr() + fl_lutman_spec = self.find_instrument(q_spec).instr_LutMan_Flux.get_instr() + + if waveform_name == "square": + length_par = fl_lutman.sq_length + flux_cw = 6 + elif "cz" in waveform_name: + length_par = fl_lutman.cz_length + flux_cw = fl_lutman._get_cw_from_wf_name(waveform_name) + else: + raise ValueError("Waveform shape not understood") + + if prepare_for_timedomain: + self.prepare_for_timedomain(qubits=[q0, q_spec]) + + awg = fl_lutman.AWG.get_instr() + using_QWG = isinstance(awg, QuTech_AWG_Module) + if using_QWG: + awg_ch = fl_lutman.cfg_awg_channel() + amp_par = awg.parameters["ch{}_amp".format(awg_ch)] + else: + awg_ch = ( + fl_lutman.cfg_awg_channel() - 1 + ) # -1 is to account for starting at 1 + ch_pair = awg_ch % 2 + awg_nr = awg_ch // 2 + + amp_par = awg.parameters[ + "awgs_{}_outputs_{}_amplitude".format(awg_nr, ch_pair) + ] + + sw = swf.FLsweep(fl_lutman, length_par, waveform_name=waveform_name) + + p = mqo.Chevron( + q0idx, + q_specidx, + q_park_idxs, + buffer_time=buffer_time, + buffer_time2=buffer_time, + flux_cw=flux_cw, + platf_cfg=self.cfg_openql_platform_fn(), + target_qubit_sequence=target_qubit_sequence, + cc=self.instr_CC.get_instr().name, + recover_q_spec=recover_q_spec, + second_excited_state=second_excited_state, + ) + self.instr_CC.get_instr().eqasm_program(p.filename) + self.instr_CC.get_instr().start() + + d = self.get_correlation_detector( + qubits=[q0, q_spec], + single_int_avg=True, + seg_per_point=1, + always_prepare=True, + ) + # d = self.get_int_avg_det(qubits=[q0, q_spec]) + # d = self.int_log_det + MC.set_sweep_function(amp_par) + MC.set_sweep_function_2D(sw) + MC.set_detector_function(d) + + prepared_state_label: str = "_prep_state_f" if second_excited_state else "_prep_state_e" + label = f"Chevron{prepared_state_label} {q0} {q_spec} {target_qubit_sequence}" + + if not adaptive_sampling: + MC.set_sweep_points(amps) + MC.set_sweep_points_2D(lengths) + MC.run(label, mode="2D", + disable_snapshot_metadata=disable_metadata) + ma.TwoD_Analysis() + else: + if adaptive_pars is None: + adaptive_pars = { + "adaptive_function": adaptive.Learner2D, + "goal": lambda l: l.npoints > adaptive_sampling_pts, + "bounds": (amps, lengths), + } + MC.set_adaptive_function_parameters(adaptive_pars) + MC.run(label + " adaptive", mode="adaptive") + ma2.Basic2DInterpolatedAnalysis() + + def measure_cryoscope( + self, + qubits, + times, + MC=None, + nested_MC=None, + double_projections: bool = False, + wait_time_flux: int = 0, + update_FIRs: bool=False, + update_IIRs: bool=False, + waveform_name: str = "square", + max_delay=None, + twoq_pair=[2, 0], + disable_metadata: bool = False, + init_buffer=0, + analyze: bool = True, + prepare_for_timedomain: bool = True, + ): + """ + Performs a cryoscope experiment to measure the shape of a flux pulse. + + Args: + qubits (list): + a list of two target qubits + + times (array): + array of measurment times + + label (str): + used to label the experiment + + waveform_name (str {"square", "custom_wf"}) : + defines the name of the waveform used in the + cryoscope. Valid values are either "square" or "custom_wf" + + max_delay {float, "auto"} : + determines the delay in the pulse sequence + if set to "auto" this is automatically set to the largest + pulse duration for the cryoscope. + + prepare_for_timedomain (bool): + calls self.prepare_for_timedomain on start + """ + assert self.ro_acq_weight_type() == 'optimal' + assert not (update_FIRs and update_IIRs), 'Can only either update IIRs or FIRs' + if update_FIRs or update_IIRs: + assert analyze==True, 'Analsis has to run for filter update' + if MC is None: + MC = self.instr_MC.get_instr() + if nested_MC is None: + nested_MC = self.instr_nested_MC.get_instr() + for q in qubits: + assert q in self.qubits() + Q_idxs = [self.find_instrument(q).cfg_qubit_nr() for q in qubits] + if prepare_for_timedomain: + self.prepare_for_timedomain(qubits=qubits) + if max_delay is None: + max_delay = 0 + else: + max_delay = np.max(times) + 40e-9 + Fl_lutmans = [self.find_instrument(q).instr_LutMan_Flux.get_instr() \ + for q in qubits] + if waveform_name == "square": + Sw_functions = [swf.FLsweep(lutman, lutman.sq_length, + waveform_name="square") for lutman in Fl_lutmans] + swfs = swf.multi_sweep_function(Sw_functions) + flux_cw = "sf_square" + elif waveform_name == "custom_wf": + Sw_functions = [swf.FLsweep(lutman, lutman.custom_wf_length, + waveform_name="custom_wf") for lutman in Fl_lutmans] + swfs = swf.multi_sweep_function(Sw_functions) + flux_cw = "sf_custom_wf" + else: + raise ValueError( + 'waveform_name "{}" should be either ' + '"square" or "custom_wf"'.format(waveform_name) + ) + + p = mqo.Cryoscope( + qubit_idxs=Q_idxs, + flux_cw=flux_cw, + twoq_pair=twoq_pair, + wait_time_flux=wait_time_flux, + platf_cfg=self.cfg_openql_platform_fn(), + cc=self.instr_CC.get_instr().name, + double_projections=double_projections, + ) + self.instr_CC.get_instr().eqasm_program(p.filename) + self.instr_CC.get_instr().start() + + MC.set_sweep_function(swfs) + MC.set_sweep_points(times) + + if double_projections: + # Cryoscope v2 + values_per_point = 4 + values_per_point_suffex = ["cos", "sin", "mcos", "msin"] + else: + # Cryoscope v1 + values_per_point = 2 + values_per_point_suffex = ["cos", "sin"] + + d = self.get_int_avg_det( + qubits=qubits, + values_per_point=values_per_point, + values_per_point_suffex=values_per_point_suffex, + single_int_avg=True, + always_prepare=True + ) + MC.set_detector_function(d) + label = 'Cryoscope_{}_amps'.format('_'.join(qubits)) + MC.run(label,disable_snapshot_metadata=disable_metadata) + # Run analysis + if analyze: + a = ma2.cv2.multi_qubit_cryoscope_analysis( + label='Cryoscope', + update_IIRs=update_IIRs, + update_FIRs=update_FIRs) + if update_FIRs: + for qubit, fltr in a.proc_data_dict['conv_filters'].items(): + lin_dist_kern = self.find_instrument(f'lin_dist_kern_{qubit}') + filter_dict = {'params': {'weights': fltr}, + 'model': 'FIR', 'real-time': True } + lin_dist_kern.filter_model_04(filter_dict) + elif update_IIRs: + for qubit, fltr in a.proc_data_dict['exponential_filter'].items(): + lin_dist_kern = self.find_instrument(f'lin_dist_kern_{qubit}') + filter_dict = {'params': fltr, + 'model': 'exponential', 'real-time': True } + if fltr['amp'] > 0: + print('Amplitude of filter is positive (overfitting).') + print('Filter not updated.') + return True + else: + # Check wich is the first empty exponential filter + for i in range(4): + _fltr = lin_dist_kern.get(f'filter_model_0{i}') + if _fltr == {}: + lin_dist_kern.set(f'filter_model_0{i}', filter_dict) + return True + else: + print(f'filter_model_0{i} used.') + print('All exponential filter tabs are full. Filter not updated.') + return True + + def measure_cryoscope_long( + self, + qubit: str, + times: list, + frequencies: list, + MC = None, + nested_MC = None, + analyze: bool = True, + update_IIRs: bool = False, + prepare_for_timedomain: bool = True, + ): + """ + Performs a cryoscope experiment to measure the shape of a flux pulse. + This long version of cryoscope, uses a spectroscopy type experiment to + probe the frequency of the qubit for pulses longer than those allowed + by conventional ramsey measurements: + t __ _____________ + MW : |<----->_/ \_ | | + ________________________ | Measurement | + Flux: _| |_ |_____________| + + Args: + qubit (list): + target qubit + + times (array): + array of measurment times + + prepare_for_timedomain (bool): + calls self.prepare_for_timedomain on start + """ + assert self.ro_acq_weight_type() == 'optimal' + assert self.ro_acq_digitized() == True + if update_IIRs: + assert analyze + if MC is None: + MC = self.instr_MC.get_instr() + if nested_MC is None: + nested_MC = self.instr_nested_MC.get_instr() + # Setup sweep times of experiment + max_length = np.max(times) + 200e-9 + Times_ns = times*1e9 + # Get instruments + Q_inst = self.find_instrument(qubit) + flux_lm = Q_inst.instr_LutMan_Flux.get_instr() + HDAWG_inst = flux_lm.AWG.get_instr() + # Employ dedicated HDAWG instrument class for long-cyroscope experiment + HDAWG_inst: ZI_HDAWG8_LongCryoscope = ZI_HDAWG8_LongCryoscope.from_other_instance(HDAWG_inst) + MW_LO_inst = Q_inst.instr_LO_mw.get_instr() + # Save previous operating parameters + LO_frequency = MW_LO_inst.frequency() + mw_gauss_width = Q_inst.mw_gauss_width() + mw_channel_amp = Q_inst.mw_channel_amp() + cfg_max_length = flux_lm.cfg_max_wf_length() + # For spectroscopy, we'll use a 80 ns qubit pi pulse. + # (we increase the duration of the pulse to probe + # a narrower frequency spectrum) + Q_inst.mw_gauss_width(20e-9) + Q_inst.mw_channel_amp(mw_channel_amp/2*1.3) + # Prepare for the experiment: + if prepare_for_timedomain: + # Prepare flux pulse + flux_lm.sq_length(max_length) + flux_lm.cfg_max_wf_length(max_length) + # Change LutMap accordingly to only upload one waveform + flux_lm.LutMap({0: {'name': 'i', 'type': 'idle'}, # idle always required + 1: {'name': 'square', 'type': 'square'}}) + try: + # Load flux waveform + flux_lm.AWG.get_instr().stop() + flux_lm.load_waveform_onto_AWG_lookuptable(regenerate_waveforms=True, wave_id='square') + flux_lm.cfg_awg_channel_amplitude() + flux_lm.cfg_awg_channel_range() + flux_lm.AWG.get_instr().start() + self.prepare_for_timedomain(qubits=[qubit]) + Q_inst.prepare_readout() + except: + print_exception() + print('Execution failed. Reseting HDAWG and flux lutman...') + # Reset old method in HDAWG + if prepare_for_timedomain: + HDAWG_inst: ZI_HDAWG8 = ZI_HDAWG8.from_other_instance(HDAWG_inst) + # Reset mw settings + MW_LO_inst.frequency(LO_frequency) + Q_inst.mw_gauss_width(mw_gauss_width) + Q_inst.mw_channel_amp(mw_channel_amp) + # Reset flux settings + flux_lm.sq_length(20e-9) + flux_lm.cfg_max_wf_length(cfg_max_length) + HDAWG_inst.reset_waveforms_zeros() + for i in range(4): + HDAWG_inst._clear_dirty_waveforms(i) + flux_lm.set_default_lutmap() + flux_lm.load_waveforms_onto_AWG_lookuptable() + # Raise error + raise RuntimeError('Preparation failed.') + # Compile experiment sequence + p = mqo.Cryoscope_long( + qubit = Q_inst.cfg_qubit_nr(), + times_ns = Times_ns, + t_total_ns=max_length*1e9, + platf_cfg = Q_inst.cfg_openql_platform_fn()) + # Sweep functions + d = Q_inst.int_avg_det + # d = self.get_int_avg_det(qubits=[qubit]) # this should be the right detector + swf1 = swf.OpenQL_Sweep( + openql_program=p, + CCL=self.instr_CC.get_instr(), + ) + swf2 = MW_LO_inst.frequency + sweep_freqs = frequencies-Q_inst.mw_freq_mod() + # Setup measurement control + MC.soft_avg(1) + MC.live_plot_enabled(True) + MC.set_sweep_function(swf1) + MC.set_sweep_function_2D(swf2) + MC.set_sweep_points(Times_ns) + MC.set_sweep_points_2D(sweep_freqs) + MC.set_detector_function(d) + try: + label = f"Cryoscope_long_{Q_inst.name}" + _max_length = max_length - 200e-9 + if _max_length > 1e-6: + label += f"_{_max_length*1e6:.0f}us" + else: + label += f"_{_max_length*1e9:.0f}ns" + # Analysis relies on snapshot + MC.run(label, mode='2D', disable_snapshot_metadata=False) + except: + analyze = False + print_exception() + print('Reseting HDAWG and flux lutman...') + # Reset operating parameters + MW_LO_inst.frequency(LO_frequency) + Q_inst.mw_gauss_width(mw_gauss_width) + Q_inst.mw_channel_amp(mw_channel_amp) + # Reset old method in HDAWG + if prepare_for_timedomain: + HDAWG_inst: ZI_HDAWG8 = ZI_HDAWG8.from_other_instance(HDAWG_inst) + # Reset flux settings + flux_lm.sq_length(20e-9) + flux_lm.cfg_max_wf_length(cfg_max_length) + HDAWG_inst.reset_waveforms_zeros() + for i in range(4): + HDAWG_inst._clear_dirty_waveforms(i) + flux_lm.set_default_lutmap() + flux_lm.load_waveforms_onto_AWG_lookuptable() + # Run analysis + if analyze: + a = ma2.cv2.Cryoscope_long_analysis(update_IIR=update_IIRs) + if update_IIRs: + lin_dist_kern = flux_lm.instr_distortion_kernel.get_instr() + filtr = {'params': a.proc_data_dict['exponential_filter'], + 'model': 'exponential', 'real-time': True } + # Check wich is the first empty exponential filter + for i in [0,1,2,3,5,6,7,8]: + _fltr = lin_dist_kern.get(f'filter_model_0{i}') + if _fltr == {}: + lin_dist_kern.set(f'filter_model_0{i}', filtr) + return True + else: + print(f'filter_model_0{i} used.') + print('All exponential filter tabs are full. Filter not updated.') + return True + + def measure_cryoscope_vs_amp( + self, + q0: str, + amps, + flux_cw: str = 'fl_cw_06', + duration: float = 100e-9, + amp_parameter: str = "channel", + MC=None, + twoq_pair=[2, 0], + label="Cryoscope", + max_delay: float = "auto", + prepare_for_timedomain: bool = True, + ): + """ + Performs a cryoscope experiment to measure the shape of a flux pulse. + + + Args: + q0 (str) : + name of the target qubit + + amps (array): + array of square pulse amplitudes + + amps_paramater (str): + The parameter through which the amplitude is changed either + {"channel", "dac"} + channel : uses the AWG channel amplitude parameter + to rescale all waveforms + dac : uploads a new waveform with a different amlitude + for each data point. + + label (str): + used to label the experiment + + waveform_name (str {"square", "custom_wf"}) : + defines the name of the waveform used in the + cryoscope. Valid values are either "square" or "custom_wf" + + max_delay {float, "auto"} : + determines the delay in the pulse sequence + if set to "auto" this is automatically set to the largest + pulse duration for the cryoscope. + + prepare_for_timedomain (bool): + calls self.prepare_for_timedomain on start + """ + if MC is None: + MC = self.instr_MC.get_instr() + + assert q0 in self.qubits() + q0idx = self.find_instrument(q0).cfg_qubit_nr() + + fl_lutman = self.find_instrument(q0).instr_LutMan_Flux.get_instr() + fl_lutman.sq_length(duration) + + if prepare_for_timedomain: + self.prepare_for_timedomain(qubits=[q0]) + + if max_delay == "auto": + max_delay = duration + 40e-9 + + if amp_parameter == "channel": + sw = fl_lutman.cfg_awg_channel_amplitude + elif amp_parameter == "dac": + sw = swf.FLsweep(fl_lutman, fl_lutman.sq_amp, waveform_name="square") + else: + raise ValueError( + 'amp_parameter "{}" should be either ' + '"channel" or "dac"'.format(amp_parameter) + ) + + p = mqo.Cryoscope( + q0idx, + buffer_time1=0, + buffer_time2=max_delay, + twoq_pair=twoq_pair, + flux_cw=flux_cw, + platf_cfg=self.cfg_openql_platform_fn()) + self.instr_CC.get_instr().eqasm_program(p.filename) + self.instr_CC.get_instr().start() + + MC.set_sweep_function(sw) + MC.set_sweep_points(amps) + d = self.get_int_avg_det( + qubits=[q0], + values_per_point=2, + values_per_point_suffex=["cos", "sin"], + single_int_avg=True, + always_prepare=True, + ) + MC.set_detector_function(d) + MC.run(label) + ma2.Basic1DAnalysis() + + def measure_timing_diagram(self, qubits: list, + flux_latencies, microwave_latencies, + MC=None, + pulse_length=40e-9, flux_cw='fl_cw_06', + prepare_for_timedomain: bool = True, + run_analysis: bool = True, + ): + """ + Measure the ramsey-like sequence with the 40 ns flux pulses played between + the two pi/2. While playing this sequence the delay of flux and microwave pulses + is varied (relative to the readout pulse), looking for configuration in which + the pulses arrive at the sample in the desired order. + + After measuting the pattern use ma2.Timing_Cal_Flux_Fine with manually + chosen parameters to match the drawn line to the measured patern. + + Args: + qubits (str) : + list of the target qubits + flux_latencies (array): + array of flux latencies to set (in seconds) + microwave_latencies (array): + array of microwave latencies to set (in seconds) + + label (str): + used to label the experiment + + prepare_for_timedomain (bool): + calls self.prepare_for_timedomain on start + + run_analysis (bool): + executes analysis functionality + """ + if MC is None: + MC = self.instr_MC.get_instr() + if prepare_for_timedomain: + self.prepare_for_timedomain(qubits) + + for q in qubits: + assert q in self.qubits() + + Q_idxs = [self.find_instrument(q).cfg_qubit_nr() for q in qubits] + + Fl_lutmans = [self.find_instrument(q).instr_LutMan_Flux.get_instr() \ + for q in qubits] + for lutman in Fl_lutmans: + lutman.sq_length(pulse_length) + + CC = self.instr_CC.get_instr() + + p = mqo.FluxTimingCalibration(qubit_idxs=Q_idxs, + platf_cfg=self.cfg_openql_platform_fn(), + flux_cw=flux_cw, + cal_points=True) + + CC.eqasm_program(p.filename) + + d = self.get_int_avg_det(qubits=qubits, single_int_avg=True) + MC.set_detector_function(d) + + s = swf.tim_flux_latency_sweep(self) + s2 = swf.tim_mw_latency_sweep(self) + MC.set_sweep_functions([s, s2]) + # MC.set_sweep_functions(s2) + + # MC.set_sweep_points(microwave_latencies) + MC.set_sweep_points(flux_latencies) + MC.set_sweep_points_2D(microwave_latencies) + label = 'Timing_diag_{}'.format('_'.join(qubits)) + MC.run_2D(label) + + if run_analysis: + # This is the analysis that should be run but with custom delays + ma2.Timing_Cal_Flux_Fine(ch_idx=0, close_figs=False, + ro_latency=-100e-9, + flux_latency=0, + flux_pulse_duration=10e-9, + mw_pulse_separation=80e-9) + + def measure_timing_1d_trace(self, q0, latencies, latency_type='flux', + MC=None, label='timing_{}_{}', + buffer_time=40e-9, + prepare_for_timedomain: bool = True, + mw_gate: str = "rx90", sq_length: float = 60e-9): + mmt_label = label.format(self.name, q0) + if MC is None: + MC = self.instr_MC.get_instr() + assert q0 in self.qubits() + q0idx = self.find_instrument(q0).cfg_qubit_nr() + self.prepare_for_timedomain([q0]) + fl_lutman = self.find_instrument(q0).instr_LutMan_Flux.get_instr() + fl_lutman.sq_length(sq_length) + CC = self.instr_CC.get_instr() + + # Wait 40 results in a mw separation of flux_pulse_duration+40ns = 120ns + p = sqo.FluxTimingCalibration(q0idx, + times=[buffer_time], + platf_cfg=self.cfg_openql_platform_fn(), + flux_cw='fl_cw_06', + cal_points=False, + mw_gate=mw_gate) + CC.eqasm_program(p.filename) + + d = self.get_int_avg_det(qubits=[q0], single_int_avg=True) + MC.set_detector_function(d) + + if latency_type == 'flux': + s = swf.tim_flux_latency_sweep(self) + elif latency_type == 'mw': + s = swf.tim_mw_latency_sweep(self) + else: + raise ValueError('Latency type {} not understood.'.format(latency_type)) + MC.set_sweep_function(s) + MC.set_sweep_points(latencies) + MC.run(mmt_label) + + a_obj = ma2.Basic1DAnalysis(label=mmt_label) + return a_obj + + + def measure_timing_1d_trace_vs_amplitude(self, q0, latencies: np.ndarray, amplitudes: Optional[np.ndarray] = None, latency_type='flux', + MC=None, label='timing_vs_amplitude_{}_{}', + buffer_time=40e-9, + prepare_for_timedomain: bool = True, + mw_gate: str = "rx90", sq_length: float = 60e-9): + mmt_label = label.format(self.name, q0) + if MC is None: + MC = self.instr_MC.get_instr() + assert q0 in self.qubits() + qubit_obj: TransmonObj = self.find_instrument(q0) + q0idx = qubit_obj.cfg_qubit_nr() + self.prepare_for_timedomain([q0]) + fl_lutman = qubit_obj.instr_LutMan_Flux.get_instr() + fl_lutman.sq_length(sq_length) + CC = self.instr_CC.get_instr() + mw_lutman = qubit_obj.instr_LutMan_MW.get_instr() + + mw_channel_amplitude: float = qubit_obj.mw_channel_amp() + if amplitudes is None: + amplitude_span: float = 0.4 # Needs to be less than 1.0 + span_lower_bound: float = mw_channel_amplitude - 0.5 * amplitude_span + span_upper_bound: float = mw_channel_amplitude + 0.5 * amplitude_span + amplitudes: np.ndarray = np.linspace( + span_lower_bound, + span_upper_bound, + 10, + ) + allowed_lower_limit: float = 0.0 + allowed_upper_limit: float = 1.0 + upper_limit_correction: float = min(0.0, allowed_upper_limit - span_upper_bound) + lower_limit_correction: float = max(0.0, allowed_lower_limit - span_lower_bound) + amplitudes = amplitudes + upper_limit_correction + lower_limit_correction + print(mw_channel_amplitude, min(amplitudes), max(amplitudes)) + + # Wait 40 results in a mw separation of flux_pulse_duration+40ns = 120ns + p = sqo.FluxTimingCalibration(q0idx, + times=[buffer_time], + platf_cfg=self.cfg_openql_platform_fn(), + flux_cw='fl_cw_06', + cal_points=False, + mw_gate=mw_gate) + CC.eqasm_program(p.filename) + + d = self.get_int_avg_det(qubits=[q0], single_int_avg=True) + MC.set_detector_function(d) + + if latency_type == 'flux': + s = swf.tim_flux_latency_sweep(self) + elif latency_type == 'mw': + s = swf.tim_mw_latency_sweep(self) + else: + raise ValueError('Latency type {} not understood.'.format(latency_type)) + s2 = mw_lutman.channel_amp + + MC.set_sweep_functions([s2, s]) + MC.set_sweep_points(amplitudes) + MC.set_sweep_points_2D(latencies) + try: + MC.run_2D(mmt_label) + except Exception as e: + mw_lutman.channel_amp(mw_channel_amplitude) + raise e + + a_obj = ma2.Basic2DAnalysis(label=mmt_label) + return a_obj + + def measure_two_qubit_randomized_benchmarking( + self, + qubits, + nr_cliffords=np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0, + 12.0, 15.0, 20.0, 25.0, 30.0, 50.0]), + nr_seeds=100, + interleaving_cliffords=[None], + label="TwoQubit_RB_{}seeds_recompile={}_icl{}_{}_{}_{}", + recompile: bool = "as needed", + cal_points=True, + flux_codeword="cz", + flux_allocated_duration_ns: int = None, + sim_cz_qubits: list = None, + compile_only: bool = False, + prepare_for_timedomain: bool = True, + pool=None, # a multiprocessing.Pool() + rb_tasks=None, # used after called with `compile_only=True` + MC=None + ): + """ + Measures two qubit randomized benchmarking, including + the leakage estimate. + + [2020-07-04 Victor] this method was updated to allow for parallel + compilation using all the cores of the measurement computer + + Refs: + Knill PRA 77, 012307 (2008) + Wood PRA 97, 032306 (2018) + + Args: + qubits (list): + pair of the qubit names on which to perform RB + + nr_cliffords (array): + lengths of the clifford sequences to perform + + nr_seeds (int): + number of different clifford sequences of each length + + interleaving_cliffords (list): + list of integers (or None) which specifies which cliffords + to interleave the sequence with (for interleaved RB) + For indices of Clifford group elements go to + two_qubit_clifford_group.py + + label (str): + string for formatting the measurement name + + recompile (bool, str {'as needed'}): + indicate whether to regenerate the sequences of clifford gates. + By default it checks whether the needed sequences were already + generated since the most recent change of OpenQL file + specified in self.cfg_openql_platform_fn + + cal_points (bool): + should calibration point (qubits in 0 and 1 states) + be included in the measurement + + flux_codeword (str): + flux codeword corresponding to the Cphase gate + + sim_cz_qubits (list): + A list of qubit names on which a simultaneous cz + instruction must be applied. This is for characterizing + CZ gates that are intended to be performed in parallel + with other CZ gates. + + flux_allocated_duration_ns (list): + Duration in ns of the flux pulse used when interleaved gate is + [100_000], i.e. idle identity + + compilation_only (bool): + Compile only the RB sequences without measuring, intended for + parallelizing iRB sequences compilation with measurements + + pool (multiprocessing.Pool): + Only relevant for `compilation_only=True` + Pool to which the compilation tasks will be assigned + + rb_tasks (list): + Only relevant when running `compilation_only=True` previously, + saving the rb_tasks, waiting for them to finish then running + this method again and providing the `rb_tasks`. + See the interleaved RB for use case. + """ + if MC is None: + MC = self.instr_MC.get_instr() + + old_weight_type = self.ro_acq_weight_type() + old_digitized = self.ro_acq_digitized() + old_avg = self.ro_acq_averages() + # Settings that have to be preserved, change is required for + if prepare_for_timedomain: + # 2-state readout and postprocessing + self.ro_acq_weight_type("optimal IQ") + self.ro_acq_digitized(False) + for q in qubits: + q_instr = self.find_instrument(q) + mw_lutman = q_instr.instr_LutMan_MW.get_instr() + # mw_lutman.load_ef_rabi_pulses_to_AWG_lookuptable() + mw_lutman.set_default_lutmap() + self.prepare_for_timedomain(qubits=qubits) + MC.soft_avg(1) # FIXME: changes state + # The detector needs to be defined before setting back parameters + d = self.get_int_logging_detector(qubits=qubits) + # set back the settings + self.ro_acq_weight_type(old_weight_type) + self.ro_acq_digitized(old_digitized) + + MC.soft_avg(1) + + qubit_idxs = [self.find_instrument(q).cfg_qubit_nr() for q in qubits] + if sim_cz_qubits is not None: + sim_cz_qubits_idxs = [ + self.find_instrument(q).cfg_qubit_nr() for q in sim_cz_qubits + ] + else: + sim_cz_qubits_idxs = None + + net_cliffords = [0, 3 * 24 + 3] + + def send_rb_tasks(pool_): + tasks_inputs = [] + for i in range(nr_seeds): + task_dict = dict( + qubits=qubit_idxs, + nr_cliffords=nr_cliffords, + nr_seeds=1, + flux_codeword=flux_codeword, + flux_allocated_duration_ns=flux_allocated_duration_ns, + platf_cfg=self.cfg_openql_platform_fn(), + program_name="TwoQ_RB_int_cl_s{}_ncl{}_icl{}_{}_{}".format( + int(i), + list(map(int, nr_cliffords)), + interleaving_cliffords, + qubits[0], + qubits[1], + ), + interleaving_cliffords=interleaving_cliffords, + cal_points=cal_points, + net_cliffords=net_cliffords, # measures with and without inverting + f_state_cal_pts=True, + recompile=recompile, + sim_cz_qubits=sim_cz_qubits_idxs, + ) + tasks_inputs.append(task_dict) + + rb_tasks = pool_.map_async(cl_oql.parallel_friendly_rb, tasks_inputs) + + return rb_tasks + + if compile_only: + assert pool is not None + rb_tasks = send_rb_tasks(pool) + return rb_tasks + + if rb_tasks is None: + # Using `with ...:` makes sure the other processes will be terminated + # avoid starting too mane processes, + # nr_processes = None will start as many as the PC can handle + nr_processes = None if recompile else 1 + with multiprocessing.Pool( + nr_processes, + maxtasksperchild=cl_oql.maxtasksperchild # avoid RAM issues + ) as pool: + rb_tasks = send_rb_tasks(pool) + cl_oql.wait_for_rb_tasks(rb_tasks) + + programs_filenames = rb_tasks.get() + + # to include calibration points + if cal_points: + sweep_points = np.append( + np.repeat(nr_cliffords, 2), + [nr_cliffords[-1] + 0.5] * 2 + + [nr_cliffords[-1] + 1.5] * 2 + + [nr_cliffords[-1] + 2.5] * 3, + ) + else: + sweep_points = np.repeat(nr_cliffords, 2) + + counter_param = ManualParameter("name_ctr", initial_value=0) + prepare_function_kwargs = { + "counter_param": counter_param, + "programs_filenames": programs_filenames, + "CC": self.instr_CC.get_instr(), + } + + # Using the first detector of the multi-detector as this is + # in charge of controlling the CC (see self.get_int_logging_detector) + d.set_prepare_function( + oqh.load_range_of_oql_programs_from_filenames, + prepare_function_kwargs, detectors="first" + ) + # d.nr_averages = 128 + + reps_per_seed = 4094 // len(sweep_points) + nr_shots = reps_per_seed * len(sweep_points) + d.set_child_attr("nr_shots", nr_shots) + + s = swf.None_Sweep(parameter_name="Number of Cliffords", unit="#") + + MC.set_sweep_function(s) + MC.set_sweep_points(np.tile(sweep_points, reps_per_seed * nr_seeds)) + + MC.set_detector_function(d) + label = label.format( + nr_seeds, + recompile, + interleaving_cliffords, + qubits[0], + qubits[1], + flux_codeword) + MC.run(label, exp_metadata={"bins": sweep_points}, + disable_snapshot_metadata=True) + # N.B. if interleaving cliffords are used, this won't work + ma2.RandomizedBenchmarking_TwoQubit_Analysis(label=label) + + def measure_two_qubit_interleaved_randomized_benchmarking( + self, + qubits: list, + nr_cliffords=np.array([1., 3., 5., 7., 9., 11., 15., + 20., 25., 30., 40., 50.]), + nr_seeds=100, + recompile: bool = "as needed", + flux_codeword="cz", + flux_allocated_duration_ns: int = None, + sim_cz_qubits: list = None, + measure_idle_flux: bool = False, + prepare_for_timedomain: bool = True, + rb_tasks_start: list = None, + pool=None, + cardinal: dict = None, + start_next_round_compilation: bool = False, + maxtasksperchild=None, + MC = None, + ): + # USED_BY: inspire_dependency_graph.py, + """ + Perform two-qubit interleaved randomized benchmarking with an + interleaved CZ gate, and optionally an interleaved idle identity with + the duration of the CZ. + + If recompile is `True` or `as needed` it will parallelize RB sequence + compilation with measurement (beside the parallelization of the RB + sequences which will always happen in parallel). + """ + if MC is None: + MC = self.instr_MC.get_instr() + + def run_parallel_iRB( + recompile, pool, rb_tasks_start: list = None, + start_next_round_compilation: bool = False, + cardinal=cardinal + ): + """ + We define the full parallel iRB procedure here as function such + that we can control the flow of the parallel RB sequences + compilations from the outside of this method, and allow for + chaining RB compilations for sequential measurements intended for + taking statistics of the RB performance + """ + rb_tasks_next = None + + # 1. Start (non-blocking) compilation for [None] + # We make it non-blocking such that the non-blocking feature + # is used for the interleaved cases + if rb_tasks_start is None: + rb_tasks_start = self.measure_two_qubit_randomized_benchmarking( + qubits=qubits, + MC=MC, + nr_cliffords=nr_cliffords, + interleaving_cliffords=[None], + recompile=recompile, + prepare_for_timedomain = prepare_for_timedomain, + flux_codeword=flux_codeword, + nr_seeds=nr_seeds, + sim_cz_qubits=sim_cz_qubits, + compile_only=True, + pool=pool + ) + + # 2. Wait for [None] compilation to finish + cl_oql.wait_for_rb_tasks(rb_tasks_start) + + # 3. Start (non-blocking) compilation for [104368] + rb_tasks_CZ = self.measure_two_qubit_randomized_benchmarking( + qubits=qubits, + MC=MC, + nr_cliffords=nr_cliffords, + interleaving_cliffords=[104368], + recompile=recompile, + prepare_for_timedomain = prepare_for_timedomain, + flux_codeword=flux_codeword, + nr_seeds=nr_seeds, + sim_cz_qubits=sim_cz_qubits, + compile_only=True, + pool=pool + ) + + # 4. Start the measurement and run the analysis for [None] + self.measure_two_qubit_randomized_benchmarking( + qubits=qubits, + MC=MC, + nr_cliffords=nr_cliffords, + interleaving_cliffords=[None], + recompile=recompile, # This of course needs to be False + prepare_for_timedomain = prepare_for_timedomain, + flux_codeword=flux_codeword, + nr_seeds=nr_seeds, + sim_cz_qubits=sim_cz_qubits, + rb_tasks=rb_tasks_start, + ) + + # 5. Wait for [104368] compilation to finish + cl_oql.wait_for_rb_tasks(rb_tasks_CZ) + + # # 6. Start (non-blocking) compilation for [100_000] + # if measure_idle_flux: + # rb_tasks_I = self.measure_two_qubit_randomized_benchmarking( + # qubits=qubits, + # MC=MC, + # nr_cliffords=nr_cliffords, + # interleaving_cliffords=[100_000], + # recompile=recompile, + # flux_codeword=flux_codeword, + # flux_allocated_duration_ns=flux_allocated_duration_ns, + # nr_seeds=nr_seeds, + # sim_cz_qubits=sim_cz_qubits, + # compile_only=True, + # pool=pool, + # ) + # elif start_next_round_compilation: + # # Optionally send to the `pool` the tasks of RB compilation to be + # # used on the next round of calling the iRB method + # rb_tasks_next = self.measure_two_qubit_randomized_benchmarking( + # qubits=qubits, + # MC=MC, + # nr_cliffords=nr_cliffords, + # interleaving_cliffords=[None], + # recompile=recompile, + # flux_codeword=flux_codeword, + # nr_seeds=nr_seeds, + # sim_cz_qubits=sim_cz_qubits, + # compile_only=True, + # pool=pool + # ) + + # 7. Start the measurement and run the analysis for [104368] + self.measure_two_qubit_randomized_benchmarking( + qubits=qubits, + MC=MC, + nr_cliffords=nr_cliffords, + interleaving_cliffords=[104368], + recompile=recompile, + prepare_for_timedomain = prepare_for_timedomain, + flux_codeword=flux_codeword, + nr_seeds=nr_seeds, + sim_cz_qubits=sim_cz_qubits, + rb_tasks=rb_tasks_CZ, + ) + a = ma2.InterleavedRandomizedBenchmarkingAnalysis( + label_base="icl[None]", + label_int="icl[104368]" + ) + # update qubit objects to record the attained CZ fidelity + if cardinal: + opposite_cardinal = {'NW':'SE', 'NE':'SW', 'SW':'NE', 'SE':'NW'} + self.find_instrument(qubits[0]).parameters[f'F_2QRB_{cardinal}'].set(1-a.proc_data_dict['quantities_of_interest']['eps_CZ_simple'].n) + self.find_instrument(qubits[1]).parameters[f'F_2QRB_{opposite_cardinal[cardinal]}'].set(1-a.proc_data_dict['quantities_of_interest']['eps_CZ_simple'].n) + + + # if measure_idle_flux: + # # 8. Wait for [100_000] compilation to finish + # cl_oql.wait_for_rb_tasks(rb_tasks_I) + + # # 8.a. Optionally send to the `pool` the tasks of RB compilation to be + # # used on the next round of calling the iRB method + # if start_next_round_compilation: + # rb_tasks_next = self.measure_two_qubit_randomized_benchmarking( + # qubits=qubits, + # MC=MC, + # nr_cliffords=nr_cliffords, + # interleaving_cliffords=[None], + # recompile=recompile, + # flux_codeword=flux_codeword, + # nr_seeds=nr_seeds, + # sim_cz_qubits=sim_cz_qubits, + # compile_only=True, + # pool=pool + # ) + + # # 9. Start the measurement and run the analysis for [100_000] + # self.measure_two_qubit_randomized_benchmarking( + # qubits=qubits, + # MC=MC, + # nr_cliffords=nr_cliffords, + # interleaving_cliffords=[100_000], + # recompile=False, + # flux_codeword=flux_codeword, + # flux_allocated_duration_ns=flux_allocated_duration_ns, + # nr_seeds=nr_seeds, + # sim_cz_qubits=sim_cz_qubits, + # rb_tasks=rb_tasks_I + # ) + # ma2.InterleavedRandomizedBenchmarkingAnalysis( + # label_base="icl[None]", + # label_int="icl[104368]", + # label_int_idle="icl[100000]" + # ) + + return rb_tasks_next + + if recompile or recompile == "as needed": + # This is an optimization that compiles the interleaved RB + # sequences for the next measurement while measuring the previous + # one + if pool is None: + # Using `with ...:` makes sure the other processes will be terminated + # `maxtasksperchild` avoid RAM issues + if not maxtasksperchild: + maxtasksperchild = cl_oql.maxtasksperchild + with multiprocessing.Pool(maxtasksperchild=maxtasksperchild) as pool: + run_parallel_iRB(recompile=recompile, + pool=pool, + rb_tasks_start=rb_tasks_start) + else: + # In this case the `pool` to execute the RB compilation tasks + # is provided, `rb_tasks_start` is expected to be as well + rb_tasks_next = run_parallel_iRB( + recompile=recompile, + pool=pool, + rb_tasks_start=rb_tasks_start, + start_next_round_compilation=start_next_round_compilation) + return rb_tasks_next + + else: + # recompile=False no need to parallelize compilation with measurement + # Perform two-qubit RB (no interleaved gate) + self.measure_two_qubit_randomized_benchmarking( + qubits=qubits, + MC=MC, + nr_cliffords=nr_cliffords, + interleaving_cliffords=[None], + recompile=recompile, + prepare_for_timedomain = prepare_for_timedomain, + flux_codeword=flux_codeword, + nr_seeds=nr_seeds, + sim_cz_qubits=sim_cz_qubits, + ) + + # Perform two-qubit RB with CZ interleaved + self.measure_two_qubit_randomized_benchmarking( + qubits=qubits, + MC=MC, + nr_cliffords=nr_cliffords, + interleaving_cliffords=[104368], + recompile=recompile, + prepare_for_timedomain = prepare_for_timedomain, + flux_codeword=flux_codeword, + nr_seeds=nr_seeds, + sim_cz_qubits=sim_cz_qubits, + ) + + a = ma2.InterleavedRandomizedBenchmarkingAnalysis( + label_base="icl[None]", + label_int="icl[104368]", + ) + + # update qubit objects to record the attained CZ fidelity + if cardinal: + opposite_cardinal = {'NW':'SE', 'NE':'SW', 'SW':'NE', 'SE':'NW'} + self.find_instrument(qubits[0]).parameters[f'F_2QRB_{cardinal}'].set(1-a.proc_data_dict['quantities_of_interest']['eps_CZ_simple'].n) + self.find_instrument(qubits[1]).parameters[f'F_2QRB_{opposite_cardinal[cardinal]}'].set(1-a.proc_data_dict['quantities_of_interest']['eps_CZ_simple'].n) + + if measure_idle_flux: + # Perform two-qubit iRB with idle identity of same duration as CZ + self.measure_two_qubit_randomized_benchmarking( + qubits=qubits, + MC=MC, + nr_cliffords=nr_cliffords, + interleaving_cliffords=[100_000], + recompile=recompile, + prepare_for_timedomain = prepare_for_timedomain, + flux_codeword=flux_codeword, + flux_allocated_duration_ns=flux_allocated_duration_ns, + nr_seeds=nr_seeds, + sim_cz_qubits=sim_cz_qubits, + ) + ma2.InterleavedRandomizedBenchmarkingAnalysis( + label_base="icl[None]", + label_int="icl[104368]", + label_int_idle="icl[100000]" + + ) + return True + + def measure_multi_qubit_simultaneous_randomized_benchmarking( + self, + qubits, + MC = None, + nr_cliffords=2 ** np.arange(11), + nr_seeds=100, + recompile: bool = "as needed", + cal_points: bool = True, + ro_acq_weight_type: str = "optimal IQ", + compile_only: bool = False, + pool=None, # a multiprocessing.Pool() + rb_tasks=None, # used after called with `compile_only=True + label_name=None, + prepare_for_timedomain=True + ): + """ + Performs simultaneous single qubit RB on multiple qubits. + The data of this experiment should be compared to the results of single + qubit RB to reveal differences due to MW crosstalk and residual coupling + + Args: + qubits (list): + list of the qubit names on which to perform RB + + nr_cliffords (array): + lengths of the clifford sequences to perform + + nr_seeds (int): + number of different clifford sequences of each length + + recompile (bool, str {'as needed'}): + indicate whether to regenerate the sequences of clifford gates. + By default it checks whether the needed sequences were already + generated since the most recent change of OpenQL file + specified in self.cfg_openql_platform_fn + + cal_points (bool): + should calibration point (qubits in 0, 1 and 2 states) + be included in the measurement + """ + + # Settings that have to be preserved, change is required for + # 2-state readout and postprocessing + old_weight_type = self.ro_acq_weight_type() + old_digitized = self.ro_acq_digitized() + self.ro_acq_weight_type(ro_acq_weight_type) + self.ro_acq_digitized(False) + + if MC is None: + MC = self.instr_MC.get_instr() + MC.soft_avg(1) + + # The detector needs to be defined before setting back parameters + d = self.get_int_logging_detector(qubits=qubits) + # set back the settings + # self.ro_acq_weight_type(old_weight_type) + # self.ro_acq_digitized(old_digitized) + + for q in qubits: + q_instr = self.find_instrument(q) + mw_lutman = q_instr.instr_LutMan_MW.get_instr() + # mw_lutman.load_ef_rabi_pulses_to_AWG_lookuptable() + mw_lutman.set_default_lutmap() + + + if prepare_for_timedomain: + self.prepare_for_timedomain(qubits=qubits, bypass_flux=True) + + MC.soft_avg(1) + + def send_rb_tasks(pool_): + tasks_inputs = [] + for i in range(nr_seeds): + task_dict = dict( + qubits=[self.find_instrument(q).cfg_qubit_nr() for q in qubits], + nr_cliffords=nr_cliffords, + nr_seeds=1, + platf_cfg=self.cfg_openql_platform_fn(), + program_name="MultiQ_RB_s{}_ncl{}_{}".format( + i, + list(map(int, nr_cliffords)), + '_'.join(qubits) + ), + interleaving_cliffords=[None], + simultaneous_single_qubit_RB=True, + cal_points=cal_points, + net_cliffords=[0, 3], # measures with and without inverting + f_state_cal_pts=True, + recompile=recompile, + ) + tasks_inputs.append(task_dict) + # pool.starmap_async can be used for positional arguments + # but we are using a wrapper + rb_tasks = pool_.map_async(cl_oql.parallel_friendly_rb, tasks_inputs) + return rb_tasks + + if compile_only: + assert pool is not None + rb_tasks = send_rb_tasks(pool) + return rb_tasks + + if rb_tasks is None: + # Using `with ...:` makes sure the other processes will be terminated + # avoid starting too mane processes, + # nr_processes = None will start as many as the PC can handle + nr_processes = None if recompile else 1 + with multiprocessing.Pool( + nr_processes, + maxtasksperchild=cl_oql.maxtasksperchild # avoid RAM issues + ) as pool: + rb_tasks = send_rb_tasks(pool) + cl_oql.wait_for_rb_tasks(rb_tasks) + + programs_filenames = rb_tasks.get() + + # to include calibration points + if cal_points: + sweep_points = np.append( + np.repeat(nr_cliffords, 2), + [nr_cliffords[-1] + 0.5] + + [nr_cliffords[-1] + 1.5] + + [nr_cliffords[-1] + 2.5], + ) + else: + sweep_points = np.repeat(nr_cliffords, 2) + + counter_param = ManualParameter("name_ctr", initial_value=0) + prepare_function_kwargs = { + "counter_param": counter_param, + "programs_filenames": programs_filenames, + "CC": self.instr_CC.get_instr(), + } + + # Using the first detector of the multi-detector as this is + # in charge of controlling the CC (see self.get_int_logging_detector) + d.set_prepare_function( + oqh.load_range_of_oql_programs_from_filenames, + prepare_function_kwargs, detectors="first" + ) + # d.nr_averages = 128 + + reps_per_seed = 4094 // len(sweep_points) + d.set_child_attr("nr_shots", reps_per_seed * len(sweep_points)) + + s = swf.None_Sweep(parameter_name="Number of Cliffords", unit="#") + + MC.set_sweep_function(s) + MC.set_sweep_points(np.tile(sweep_points, reps_per_seed * nr_seeds)) + + MC.set_detector_function(d) + + label="Multi_Qubit_sim_RB_{}seeds_recompile={}_".format(nr_seeds, recompile) + if label_name is None: + label += '_'.join(qubits) + else: + label += label_name + MC.run(label, exp_metadata={"bins": sweep_points}) + + cal_2Q = ["0"*len(qubits), "1"*len(qubits), "2"*len(qubits)] + Analysis = [] + for i in range(len(qubits)): + rates_I_quad_ch_idx = 2*i + cal_1Q = [state[rates_I_quad_ch_idx // 2] for state in cal_2Q] + a = ma2.RandomizedBenchmarking_SingleQubit_Analysis( + label=label, + rates_I_quad_ch_idx=rates_I_quad_ch_idx, + cal_pnts_in_dset=cal_1Q + ) + Analysis.append(a) + + return Analysis + + def measure_two_qubit_simultaneous_randomized_benchmarking( + self, + qubits, + MC= None, + nr_cliffords=2 ** np.arange(11), + nr_seeds=100, + interleaving_cliffords=[None], + label="TwoQubit_sim_RB_{}seeds_recompile={}_{}_{}", + recompile: bool = "as needed", + cal_points: bool = True, + ro_acq_weight_type: str = "optimal IQ", + compile_only: bool = False, + pool=None, # a multiprocessing.Pool() + rb_tasks=None # used after called with `compile_only=True` + ): + """ + Performs simultaneous single qubit RB on two qubits. + The data of this experiment should be compared to the results of single + qubit RB to reveal differences due to crosstalk and residual coupling + + Args: + qubits (list): + pair of the qubit names on which to perform RB + + nr_cliffords (array): + lengths of the clifford sequences to perform + + nr_seeds (int): + number of different clifford sequences of each length + + interleaving_cliffords (list): + list of integers (or None) which specifies which cliffords + to interleave the sequence with (for interleaved RB) + For indices of Clifford group elements go to + two_qubit_clifford_group.py + + label (str): + string for formatting the measurement name + + recompile (bool, str {'as needed'}): + indicate whether to regenerate the sequences of clifford gates. + By default it checks whether the needed sequences were already + generated since the most recent change of OpenQL file + specified in self.cfg_openql_platform_fn + + cal_points (bool): + should calibration point (qubits in 0, 1 and 2 states) + be included in the measurement + """ + + # Settings that have to be preserved, change is required for + # 2-state readout and postprocessing + old_weight_type = self.ro_acq_weight_type() + old_digitized = self.ro_acq_digitized() + self.ro_acq_weight_type(ro_acq_weight_type) + self.ro_acq_digitized(False) + + for q in qubits: + q_instr = self.find_instrument(q) + mw_lutman = q_instr.instr_LutMan_MW.get_instr() + mw_lutman.set_default_lutmap() + + self.prepare_for_timedomain(qubits=qubits) + if MC is None: + MC = self.instr_MC.get_instr() + MC.soft_avg(1) + + # The detector needs to be defined before setting back parameters + d = self.get_int_logging_detector(qubits=qubits) + # set back the settings + self.ro_acq_weight_type(old_weight_type) + self.ro_acq_digitized(old_digitized) + + def send_rb_tasks(pool_): + tasks_inputs = [] + for i in range(nr_seeds): + task_dict = dict( + qubits=[self.find_instrument(q).cfg_qubit_nr() for q in qubits], + nr_cliffords=nr_cliffords, + nr_seeds=1, + platf_cfg=self.cfg_openql_platform_fn(), + program_name="TwoQ_Sim_RB_int_cl{}_s{}_ncl{}_{}_{}_double".format( + i, + list(map(int, nr_cliffords)), + interleaving_cliffords, + qubits[0], + qubits[1], + ), + interleaving_cliffords=interleaving_cliffords, + simultaneous_single_qubit_RB=True, + cal_points=cal_points, + net_cliffords=[0, 3], # measures with and without inverting + f_state_cal_pts=True, + recompile=recompile, + ) + tasks_inputs.append(task_dict) + # pool.starmap_async can be used for positional arguments + # but we are using a wrapper + rb_tasks = pool_.map_async(cl_oql.parallel_friendly_rb, tasks_inputs) + + return rb_tasks + + if compile_only: + assert pool is not None + rb_tasks = send_rb_tasks(pool) + return rb_tasks + + if rb_tasks is None: + # Using `with ...:` makes sure the other processes will be terminated + # avoid starting too mane processes, + # nr_processes = None will start as many as the PC can handle + nr_processes = None if recompile else 1 + with multiprocessing.Pool( + nr_processes, + maxtasksperchild=cl_oql.maxtasksperchild # avoid RAM issues + ) as pool: + rb_tasks = send_rb_tasks(pool) + cl_oql.wait_for_rb_tasks(rb_tasks) + + programs_filenames = rb_tasks.get() + + # to include calibration points + if cal_points: + sweep_points = np.append( + np.repeat(nr_cliffords, 2), + [nr_cliffords[-1] + 0.5] * 2 + + [nr_cliffords[-1] + 1.5] * 2 + + [nr_cliffords[-1] + 2.5] * 3, + ) + else: + sweep_points = np.repeat(nr_cliffords, 2) + + counter_param = ManualParameter("name_ctr", initial_value=0) + prepare_function_kwargs = { + "counter_param": counter_param, + "programs_filenames": programs_filenames, + "CC": self.instr_CC.get_instr(), + } + + # Using the first detector of the multi-detector as this is + # in charge of controlling the CC (see self.get_int_logging_detector) + d.set_prepare_function( + oqh.load_range_of_oql_programs_from_filenames, + prepare_function_kwargs, detectors="first" + ) + # d.nr_averages = 128 + + reps_per_seed = 4094 // len(sweep_points) + d.set_child_attr("nr_shots", reps_per_seed * len(sweep_points)) + + s = swf.None_Sweep(parameter_name="Number of Cliffords", unit="#") + + MC.set_sweep_function(s) + MC.set_sweep_points(np.tile(sweep_points, reps_per_seed * nr_seeds)) + + MC.set_detector_function(d) + label = label.format(nr_seeds, recompile, qubits[0], qubits[1]) + MC.run(label, exp_metadata={"bins": sweep_points}) + + # N.B. if interleaving cliffords are used, this won't work + # [2020-07-11 Victor] not sure if NB still holds + + cal_2Q = ["00", "01", "10", "11", "02", "20", "22"] + + rates_I_quad_ch_idx = 0 + cal_1Q = [state[rates_I_quad_ch_idx // 2] for state in cal_2Q] + a_q0 = ma2.RandomizedBenchmarking_SingleQubit_Analysis( + label=label, + rates_I_quad_ch_idx=rates_I_quad_ch_idx, + cal_pnts_in_dset=cal_1Q + ) + rates_I_quad_ch_idx = 2 + cal_1Q = [state[rates_I_quad_ch_idx // 2] for state in cal_2Q] + a_q1 = ma2.RandomizedBenchmarking_SingleQubit_Analysis( + label=label, + rates_I_quad_ch_idx=rates_I_quad_ch_idx, + cal_pnts_in_dset=cal_1Q + ) + + return a_q0, a_q1 + + def measure_gate_process_tomography( + self, + meas_qubit: str, + gate_qubits: list, + gate_name: str, + gate_duration_ns: int, + wait_after_gate_ns: int = 0, + nr_shots_per_case: int = 2**14, + prepare_for_timedomain: bool= True, + disable_metadata: bool = False, + ): + assert self.ro_acq_weight_type() != 'optimal', 'IQ readout required!' + q_meas = self.find_instrument(meas_qubit) + # q_gate = self.find_instrument(gate_qubit) + q_gate_idx = [self.find_instrument(q).cfg_qubit_nr() for q in gate_qubits] + + MC = self.instr_MC.get_instr() + if prepare_for_timedomain: + if not all([q==meas_qubit for q in gate_qubits]): + self.prepare_for_timedomain(qubits=gate_qubits, prepare_for_readout=False) + self.prepare_for_timedomain(qubits=[meas_qubit]) + # Experiment + p = mqo.gate_process_tomograhpy( + meas_qubit_idx=q_meas.cfg_qubit_nr(), + gate_qubit_idx=q_gate_idx, + gate_name=gate_name, + gate_duration_ns=gate_duration_ns, + wait_after_gate_ns=wait_after_gate_ns, + platf_cfg=self.cfg_openql_platform_fn()) + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name='Shot', unit='#', + upload=True) + d = self.get_int_logging_detector() + nr_shots = (2*18+3)*nr_shots_per_case + if nr_shots < 2**20: + d.detectors[0].nr_shots = nr_shots + else: + _shots_per_run = ((2**20)//(2*18+3))*(2*18+3) + nr_shots = np.ceil(nr_shots/_shots_per_run)*_shots_per_run + print(f'Number of shots per case increased to {nr_shots/(2*18+3)}.') + d.detectors[0].nr_shots = _shots_per_run + MC.soft_avg(1) + MC.set_detector_function(d) + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(nr_shots)) + MC.live_plot_enabled(False) + try: + label = f'Gate_process_tomograhpy_gate_{gate_name}_{meas_qubit}' + MC.run(label+self.msmt_suffix, disable_snapshot_metadata=disable_metadata) + except: + print_exception() + # MC.live_plot_enabled(True) + # Analysis + ma2.tomoa.Gate_process_tomo_Analysis(qubit=q_meas.name, label='Gate_process') + + ######################################################## + # Calibration methods + ######################################################## + def create_dep_graph(self): + dags = [] + for qi in self.qubits(): + q_obj = self.find_instrument(qi) + if hasattr(q_obj, "_dag"): + dag = q_obj._dag + else: + dag = q_obj.create_dep_graph() + dags.append(dag) + + dag = nx.compose_all(dags) + + dag.add_node(self.name + " multiplexed readout") + dag.add_node(self.name + " resonator frequencies coarse") + dag.add_node("AWG8 MW-staircase") + dag.add_node("AWG8 Flux-staircase") + + # Timing of channels can be done independent of the qubits + # it is on a per frequency per feedline basis so not qubit specific + dag.add_node(self.name + " mw-ro timing") + dag.add_edge(self.name + " mw-ro timing", "AWG8 MW-staircase") + + dag.add_node(self.name + " mw-vsm timing") + dag.add_edge(self.name + " mw-vsm timing", self.name + " mw-ro timing") + + for edge_L, edge_R in self.qubit_edges(): + dag.add_node("Chevron {}-{}".format(edge_L, edge_R)) + dag.add_node("CZ {}-{}".format(edge_L, edge_R)) + + dag.add_edge( + "CZ {}-{}".format(edge_L, edge_R), + "Chevron {}-{}".format(edge_L, edge_R), + ) + dag.add_edge( + "CZ {}-{}".format(edge_L, edge_R), "{} cryo dist. corr.".format(edge_L) + ) + dag.add_edge( + "CZ {}-{}".format(edge_L, edge_R), "{} cryo dist. corr.".format(edge_R) + ) + + dag.add_edge( + "Chevron {}-{}".format(edge_L, edge_R), + "{} single qubit gates fine".format(edge_L), + ) + dag.add_edge( + "Chevron {}-{}".format(edge_L, edge_R), + "{} single qubit gates fine".format(edge_R), + ) + dag.add_edge("Chevron {}-{}".format(edge_L, edge_R), "AWG8 Flux-staircase") + dag.add_edge( + "Chevron {}-{}".format(edge_L, edge_R), + self.name + " multiplexed readout", + ) + + dag.add_node("{}-{} mw-flux timing".format(edge_L, edge_R)) + + dag.add_edge( + edge_L + " cryo dist. corr.", + "{}-{} mw-flux timing".format(edge_L, edge_R), + ) + dag.add_edge( + edge_R + " cryo dist. corr.", + "{}-{} mw-flux timing".format(edge_L, edge_R), + ) + + dag.add_edge( + "Chevron {}-{}".format(edge_L, edge_R), + "{}-{} mw-flux timing".format(edge_L, edge_R), + ) + dag.add_edge( + "{}-{} mw-flux timing".format(edge_L, edge_R), "AWG8 Flux-staircase" + ) + + dag.add_edge( + "{}-{} mw-flux timing".format(edge_L, edge_R), + self.name + " mw-ro timing", + ) + + for qubit in self.qubits(): + dag.add_edge(qubit + " ro pulse-acq window timing", "AWG8 MW-staircase") + + dag.add_edge(qubit + " room temp. dist. corr.", "AWG8 Flux-staircase") + dag.add_edge(self.name + " multiplexed readout", qubit + " optimal weights") + + dag.add_edge( + qubit + " resonator frequency", + self.name + " resonator frequencies coarse", + ) + dag.add_edge(qubit + " pulse amplitude coarse", "AWG8 MW-staircase") + + for qi in self.qubits(): + q_obj = self.find_instrument(qi) + # ensures all references are to the main dag + q_obj._dag = dag + + self._dag = dag + return dag + + def measure_multi_AllXY(self, qubits: list = None ,MC=None, + prepare_for_timedomain: bool = True, + disable_metadata: bool = False, + double_points =True,termination_opt=0.08): + + if qubits is None: + qubits = self.qubits() + if prepare_for_timedomain: + self.ro_acq_weight_type('optimal') + self.prepare_for_timedomain(qubits=qubits, bypass_flux=True) + + qubits_idx = [] + for q in qubits: + q_ob = self.find_instrument(q) + q_nr = q_ob.cfg_qubit_nr() + qubits_idx.append(q_nr) + + p = mqo.multi_qubit_AllXY(qubits_idx=qubits_idx, + platf_cfg=self.cfg_openql_platform_fn(), + double_points = double_points) + + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr()) + d = self.get_int_avg_det(qubits=qubits) + if MC is None: + MC = self.instr_MC.get_instr() + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(42)) + MC.set_detector_function(d) + MC.run('Multi_AllXY_'+'_'.join(qubits), + disable_snapshot_metadata = disable_metadata) + a = ma2.Multi_AllXY_Analysis() + + dev = 0 + for Q in qubits: + dev += a.proc_data_dict['deviation_{}'.format(Q)] + if dev > len(qubits)*termination_opt: + return False + else: + return True + + def measure_multi_rabi(self, qubits: list = None, prepare_for_timedomain=True ,MC=None, + amps=np.linspace(0,1,31),calibrate=True): + if qubits is None: + qubits = self.qubits() + if prepare_for_timedomain: + self.prepare_for_timedomain(qubits=qubits) + + qubits_idx = [] + for q in qubits: + qub = self.find_instrument(q) + qubits_idx.append(qub.cfg_qubit_nr()) + + + p = mqo.multi_qubit_rabi(qubits_idx = qubits_idx,platf_cfg = self.cfg_openql_platform_fn()) + + self.instr_CC.get_instr().eqasm_program(p.filename) + + s = swf.mw_lutman_amp_sweep(qubits = qubits,device=self) + + d = self.int_avg_det_single + + if MC is None: + MC = self.instr_MC.get_instr() + + MC.set_sweep_function(s) + MC.set_sweep_points(amps) + MC.set_detector_function(d) + label = 'Multi_qubit_rabi_'+'_'.join(qubits) + MC.run(name = label) + a = ma2.Multi_Rabi_Analysis(qubits = qubits, label = label) + if calibrate: + b = a.proc_data_dict + for q in qubits: + pi_amp = b['quantities_of_interest'][q]['pi_amp'] + qub = self.find_instrument(q) + qub.mw_channel_amp(pi_amp) + return True + + def measure_multi_ramsey(self, qubits: list = None, times = None, GBT = True, + artificial_periods: float = None, label=None, + MC=None, prepare_for_timedomain=True, + update_T2=True,update_frequency = False): + if MC is None: + MC = self.instr_MC.get_instr() + + if qubits is None: + qubits = self.qubits() + + if prepare_for_timedomain: + self.prepare_for_timedomain(qubits=qubits, bypass_flux=True) + + if artificial_periods is None: + artificial_periods = 5 + + if times is None: + t = True + times = [] + else: + t = False + + + qubits_idx = [] + for i,q in enumerate(qubits): + qub = self.find_instrument(q) + qubits_idx.append(qub.cfg_qubit_nr()) + stepsize = max((4*qub.T2_star()/61)//(abs(qub.cfg_cycle_time())) + *abs(qub.cfg_cycle_time()),40e-9) + if t is True: + set_time = np.arange(0,stepsize*64,stepsize) + times.append(set_time) + + artificial_detuning = artificial_periods/times[i][-1] + freq_qubit = qub.freq_qubit() + mw_mod = qub.mw_freq_mod.get() + freq_det = freq_qubit - mw_mod + artificial_detuning + qub.instr_LO_mw.get_instr().set('frequency', freq_det) + + points = len(times[0]) + + p = mqo.multi_qubit_ramsey(times = times,qubits_idx=qubits_idx, + platf_cfg=self.cfg_openql_platform_fn()) + + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr()) + + d = self.get_int_avg_det(qubits=qubits) + + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(points)) + MC.set_detector_function(d) + if label is None: + label = 'Multi_Ramsey_'+'_'.join(qubits) + MC.run(label) + + a = ma2.Multi_Ramsey_Analysis(qubits = qubits, times = times, artificial_detuning=artificial_detuning,label=label) + qoi = a.proc_data_dict['quantities_of_interest'] + for q in qubits: + qub = self.find_instrument(q) + if update_T2: + T2_star = qoi[q]['tau'] + qub.T2_star(T2_star) + if update_frequency: + new_freq = qoi[q]['freq_new'] + qub.freq_qubit(new_freq) + if GBT: + return True + else: + return a + + def calibrate_multi_frequency_fine(self,qubits: list = None,times = None, + artificial_periods: float = None, + MC=None, prepare_for_timedomain=True, + update_T2=False,update_frequency = True, + stepsize:float = None,termination_opt = 0, + steps=[1, 1, 3, 10, 30, 100, 300, 1000]): + if qubits is None: + qubits = self.qubits() + if artificial_periods is None: + artificial_periods = 2.5 + if stepsize is None: + stepsize = 20e-9 + for n in steps: + times = [] + for q in qubits: + qub = self.find_instrument(q) + time = np.arange(0,50*n*stepsize,n*stepsize) + times.append(time) + + label = 'Multi_Ramsey_{}_pulse_sep_'.format(n)+ '_'.join(qubits) + + a = self.measure_multi_ramsey(qubits = qubits, times =times, MC=MC, GBT=False, + artificial_periods = artificial_periods, label = label, + prepare_for_timedomain =prepare_for_timedomain, + update_frequency=False,update_T2 = update_T2) + for q in qubits: + + qub = self.find_instrument(q) + freq = a.proc_data_dict['quantities_of_interest'][q]['freq_new'] + T2 = a.proc_data_dict['quantities_of_interest'][q]['tau'] + fit_error = a.proc_data_dict['{}_fit_res'.format(q)].chisqr + + if (times[0][-1] < 2.*T2) and (update_frequency is True): + # If the last step is > T2* then the next will be for sure + qub.freq_qubit(freq) + + + + T2_max = max(a.proc_data_dict['quantities_of_interest'][q]['tau'] for q in qubits) + if times[0][-1] > 2.*T2_max: + # If the last step is > T2* then the next will be for sure + + print('Breaking of measurement because of T2*') + break + return True + + def measure_multi_T1(self,qubits: list = None, times = None, MC=None, + prepare_for_timedomain=True, analyze=True, + update=True): + + if MC is None: + MC = self.instr_MC.get_instr() + + if qubits is None: + qubits = self.qubits() + + if prepare_for_timedomain: + self.prepare_for_timedomain(qubits=qubits) + + + qubits_idx = [] + set_times = [] + for q in qubits: + qub = self.find_instrument(q) + qubits_idx.append(qub.cfg_qubit_nr()) + stepsize = max((4*qub.T1()/31)//(abs(qub.cfg_cycle_time())) + *abs(qub.cfg_cycle_time()),40e-9) + set_time = np.arange(0,stepsize*34,stepsize) + set_times.append(set_time) + + if times is None: + times = set_times + + points = len(times[0]) + + + + p = mqo.multi_qubit_T1(times = times,qubits_idx=qubits_idx, + platf_cfg=self.cfg_openql_platform_fn()) + + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr()) + + d = self.get_int_avg_det(qubits=qubits) + + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(points)) + MC.set_detector_function(d) + label = 'Multi_T1_'+'_'.join(qubits) + MC.run(label) + + if analyze: + a = ma2.Multi_T1_Analysis(qubits=qubits,times = times) + if update: + for q in qubits: + qub = self.find_instrument(q) + T1 = a.proc_data_dict['quantities_of_interest'][q]['tau'] + qub.T1(T1) + + return a + + def measure_multi_Echo(self,qubits: list=None, times = None, MC=None, + prepare_for_timedomain=True, analyze=True, + update=True): + if MC is None: + MC = self.instr_MC.get_instr() + + if qubits is None: + qubits = self.qubits() + + if prepare_for_timedomain: + self.prepare_for_timedomain(qubits=qubits) + + + qubits_idx = [] + set_times = [] + for q in qubits: + qub = self.find_instrument(q) + qubits_idx.append(qub.cfg_qubit_nr()) + stepsize = max((2*qub.T2_echo()/61)//(abs(qub.cfg_cycle_time())) + *abs(qub.cfg_cycle_time()),20e-9) + set_time = np.arange(0,stepsize*64,stepsize) + set_times.append(set_time) + + if times is None: + times = set_times + + points = len(times[0]) + + + p = mqo.multi_qubit_Echo(times = times,qubits_idx=qubits_idx, + platf_cfg=self.cfg_openql_platform_fn()) + + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr()) + + d = self.get_int_avg_det(qubits=qubits) + + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(points)) + MC.set_detector_function(d) + label = 'Multi_Echo_'+'_'.join(qubits) + MC.run(label) + if analyze: + a = ma2.Multi_Echo_Analysis(label = label, qubits = qubits,times = times) + if update: + qoi = a.proc_data_dict['quantities_of_interest'] + for q in qubits: + qub = self.find_instrument(q) + T2_echo = qoi[q]['tau'] + qub.T2_echo(T2_echo) + + return True + + def measure_multi_flipping(self, + qubits: list=None, + number_of_flips: int=None, + equator=True, + ax='x', + angle='180', + MC=None, + prepare_for_timedomain=True, + update=False, + scale_factor_based_on_line: bool = False + ): + # allow flipping only with pi/2 or pi, and x or y pulses + assert angle in ['90','180'] + assert ax.lower() in ['x', 'y'] + + if MC is None: + MC = self.instr_MC.get_instr() + + if qubits is None: + qubits = self.qubits() + + if prepare_for_timedomain: + self.prepare_for_timedomain(qubits=qubits, bypass_flux=True) + + if number_of_flips is None: + number_of_flips = 30 + nf = np.arange(0,(number_of_flips+4)*2,2) + + qubits_idx = [] + for q in qubits: + qub = self.find_instrument(q) + qubits_idx.append(qub.cfg_qubit_nr()) + + p = mqo.multi_qubit_flipping(number_of_flips = nf,qubits_idx=qubits_idx, + platf_cfg=self.cfg_openql_platform_fn(), + equator=equator,ax=ax, angle=angle) + + s = swf.OpenQL_Sweep(openql_program=p,unit = '#', + CCL=self.instr_CC.get_instr()) + + d = self.get_int_avg_det(qubits=qubits) + + MC.set_sweep_function(s) + MC.set_sweep_points(nf) + MC.set_detector_function(d) + label = 'Multi_flipping_'+'_'.join(qubits) + MC.run(label) + + a = ma2.Multi_Flipping_Analysis(qubits=qubits, label=label) + + if update: + for q in qubits: + # Same as in single-qubit flipping: + # Choose scale factor based on simple goodness-of-fit comparison, + # unless it is forced by `scale_factor_based_on_line` + # This method gives priority to the line fit: + # the cos fit will only be chosen if its chi^2 relative to the + # chi^2 of the line fit is at least 10% smaller + # cos_chisqr = a.proc_data_dict['quantities_of_interest'][q]['cos_fit'].chisqr + # line_chisqr = a.proc_data_dict['quantities_of_interest'][q]['line_fit'].chisqr + + # if scale_factor_based_on_line: + # scale_factor = a.proc_data_dict['quantities_of_interest'][q]['line_fit']['sf'] + # elif (line_chisqr - cos_chisqr)/line_chisqr > 0.1: + # scale_factor = a.proc_data_dict['quantities_of_interest'][q]['cos_fit']['sf'] + # else: + # scale_factor = a.proc_data_dict['quantities_of_interest'][q]['line_fit']['sf'] + + if scale_factor_based_on_line: + scale_factor = a.proc_data_dict['quantities_of_interest'][q]['line_fit']['sf'] + else: + # choose scale factor preferred by analysis (currently based on BIC measure) + scale_factor = a.proc_data_dict['{}_scale_factor'.format(q)] + + if abs(scale_factor-1) < 1e-3: + print(f'Qubit {q}: Pulse amplitude accurate within 0.1%. Amplitude not updated.') + return a + + qb = self.find_instrument(q) + if angle == '180': + if qb.cfg_with_vsm(): + amp_old = qb.mw_vsm_G_amp() + qb.mw_vsm_G_amp(scale_factor*amp_old) + else: + amp_old = qb.mw_channel_amp() + qb.mw_channel_amp(scale_factor*amp_old) + elif angle == '90': + amp_old = qb.mw_amp90_scale() + qb.mw_amp90_scale(scale_factor*amp_old) + + print('Qubit {}: Pulse amplitude for {}-{} pulse changed from {:.3f} to {:.3f}'.format( + q, ax, angle, amp_old, scale_factor*amp_old)) + + def measure_multi_motzoi(self,qubits: list = None, prepare_for_timedomain=True ,MC=None, + amps=None,calibrate=True): + if qubits is None: + qubits = self.qubits() + if prepare_for_timedomain: + self.prepare_for_timedomain(qubits=qubits) + if amps is None: + amps = np.linspace(-0.3,0.3,31) + + qubits_idx = [] + for q in qubits: + qub = self.find_instrument(q) + qubits_idx.append(qub.cfg_qubit_nr()) + + p = mqo.multi_qubit_motzoi(qubits_idx = qubits_idx,platf_cfg = self.cfg_openql_platform_fn()) + + self.instr_CC.get_instr().eqasm_program(p.filename) + + s = swf.motzoi_lutman_amp_sweep(qubits = qubits,device=self) + + d = self.get_int_avg_det(qubits = qubits,single_int_avg=True, + values_per_point=2, + values_per_point_suffex=['yX', 'xY'], + always_prepare=True) + + if MC is None: + MC = self.instr_MC.get_instr() + + MC.set_sweep_function(s) + MC.set_sweep_points(amps) + MC.set_detector_function(d) + label = 'Multi_Motzoi_'+'_'.join(qubits) + MC.run(name = label) + + a = ma2.Multi_Motzoi_Analysis(qubits=qubits, label = label) + if calibrate: + for q in qubits: + qub = self.find_instrument(q) + opt_motzoi = a.proc_data_dict['{}_intersect'.format(q)][0] + qub.mw_motzoi(opt_motzoi) + return True + + ####################################### + # Two qubit gate calibration functions + ####################################### + def measure_ramsey_tomo(self, + qubit_ramsey: list, + qubit_control: list, + excited_spectators: list = [], + nr_shots_per_case: int = 2**10, + flux_codeword: str = 'cz', + prepare_for_timedomain: bool = True, + MC=None): + ''' + Doc string + + ''' + + qubitR = [self.find_instrument(qr) for qr in qubit_ramsey] + qubitR_idxs = [qr.cfg_qubit_nr() for qr in qubitR] + + qubitC = [self.find_instrument(qc) for qc in qubit_control] + qubitC_idxs = [qc.cfg_qubit_nr() for qc in qubitC] + + # Get indices for spectator qubits + qubitS = [self.find_instrument(q) for q in excited_spectators] + qubitS_idxs = [q.cfg_qubit_nr() for q in qubitS] + + # Assert we have IQ readout + assert self.ro_acq_weight_type() == 'optimal IQ', 'device not in "optimal IQ" mode' + assert self.ro_acq_digitized() == False, 'RO should not be digitized' + + for qr in qubitR: + mw_lutman = qr.instr_LutMan_MW.get_instr() + mw_lutman.load_ef_rabi_pulses_to_AWG_lookuptable() + if prepare_for_timedomain: + self.prepare_for_timedomain(qubits=[*excited_spectators], prepare_for_readout=False) + self.prepare_for_timedomain(qubits=[*qubit_ramsey, *qubit_control]) + + + p = mqo.Ramsey_tomo(qR= qubitR_idxs, + qC= qubitC_idxs, + exc_specs= qubitS_idxs, + flux_codeword=flux_codeword, + platf_cfg=self.cfg_openql_platform_fn()) + + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr()) + + # d = self.get_int_log_det(qubits=[qubit_ramsey, qubit_control]) + d = self.get_int_logging_detector(qubits=[*qubit_ramsey, *qubit_control], + result_logging_mode='raw') + d.detectors[0].nr_shots = 4096 + try: + d.detectors[1].nr_shots = 4096 + except: + pass + try: + d.detectors[2].nr_shots = 4096 + except: + pass + + nr_shots = int(16*256*2**4) + if MC is None: + MC = self.instr_MC.get_instr() + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(nr_shots)) + MC.set_detector_function(d) + MC.run('Ramsey_tomo_R_{}_C_{}_S_{}'.format(qubit_ramsey, qubit_control, excited_spectators)) + # Analysis + a = ma2.tqg.Two_qubit_gate_tomo_Analysis(label='Ramsey', n_pairs=len(qubit_ramsey)) + + return a.qoi + + def measure_repeated_CZ_experiment(self, + qubit_pair: list, + rounds: int = 50, + nr_shots_per_case: int = 2**13, + flux_codeword: str = 'cz', + gate_time_ns: int = 60, + prepare_for_timedomain: bool = True, + analyze: bool = True, + disable_metadata: bool = False): + ''' + Function used to measure CZ leakage using a repeated measurment scheme: + xrounds + Q0 ---H---o---Meas + | + Q1 ---H---o---Meas + + Requires Qutrit readout. + Also measures 2 qutrit readout assignment to accurately estimate + leakage rates. + ''' + assert self.ro_acq_digitized() == False, 'Analog readout required' + assert 'IQ' in self.ro_acq_weight_type(), 'IQ readout is required!' + MC = self.instr_MC.get_instr() + # Configure lutmap + for qubit in qubit_pair: + qb = self.find_instrument(qubit) + mwl = qb.instr_LutMan_MW.get_instr() + mwl.set_default_lutmap() + self.prepare_for_timedomain(qubits = qubit_pair) + + # get qubit idx + Q_idx = [self.find_instrument(q).cfg_qubit_nr() for q in qubit_pair] + # Set UHF number of shots + _cycle = (2*rounds+9) + nr_shots = _cycle*nr_shots_per_case + # if heralded_init: + # nr_shots *= 2 + uhfqc_max_shots = 2**20 + if nr_shots < uhfqc_max_shots: + # all shots can be acquired in a single UHF run + shots_per_run = nr_shots + else: + # Number of UHF acquisition runs + nr_runs = ceil(nr_shots/uhfqc_max_shots) + shots_per_run = int((nr_shots/nr_runs)/_cycle)*_cycle + nr_shots = nr_runs*shots_per_run + # Compile sequence + p = mqo.repeated_CZ_experiment( + qubit_idxs=Q_idx, + rounds=rounds, + flux_codeword=flux_codeword, + gate_time_ns=gate_time_ns, + heralded_init=False, + platf_cfg=self.cfg_openql_platform_fn()) + + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name='Shot', unit='#', + upload=True) + MC.soft_avg(1) + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(nr_shots)) + d = self.get_int_logging_detector(qubits=qubit_pair) + for det in d.detectors: + det.nr_shots = shots_per_run + MC.set_detector_function(d) + MC.live_plot_enabled(False) + label = f'Repeated_CZ_experiment_qubit_pair_{"_".join(qubit_pair)}' + MC.run(label+self.msmt_suffix, disable_snapshot_metadata=disable_metadata) + MC.live_plot_enabled(True) + # Analysis + if analyze: + a = ma2.tqg.Repeated_CZ_experiment_Analysis(rounds=rounds, label=label) + + def measure_vcz_A_tmid_landscape( + self, + Q0, + Q1, + T_mids, + A_ranges, + A_points: int, + Q_parks: list = None, + Tp : float = None, + flux_codeword: str = 'cz', + flux_pulse_duration: float = 60e-9, + prepare_for_timedomain: bool = True, + disable_metadata: bool = False): + """ + Perform 2D sweep of amplitude and wave parameter while measuring + conditional phase and missing fraction via the "conditional + oscillation" experiment. + + Q0 : High frequency qubit(s). Can be given as single qubit or list. + Q1 : Low frequency qubit(s). Can be given as single qubit or list. + T_mids : list of vcz "T_mid" values to sweep. + A_ranges : list of tuples containing ranges of amplitude sweep. + A_points : Number of points to sweep for amplitude range. + Q_parks : list of qubits parked during operation. + """ + if isinstance(Q0, str): + Q0 = [Q0] + if isinstance(Q1, str): + Q1 = [Q1] + assert len(Q0) == len(Q1) + + MC = self.instr_MC.get_instr() + nested_MC = self.instr_nested_MC.get_instr() + # get gate directions + directions = [get_gate_directions(q0, q1) for q0, q1 in zip(Q0, Q1)] + Flux_lm_0 = [self.find_instrument(q0).instr_LutMan_Flux.get_instr() for q0 in Q0] + Flux_lm_1 = [self.find_instrument(q1).instr_LutMan_Flux.get_instr() for q1 in Q1] + Flux_lms_park = [self.find_instrument(q).instr_LutMan_Flux.get_instr() for q in Q_parks] + # Prepare for time domain + if prepare_for_timedomain: + self.prepare_for_timedomain( + qubits=np.array([[Q0[i],Q1[i]] for i in range(len(Q0))]).flatten(), + bypass_flux=True) + for i, lm in enumerate(Flux_lm_0): + print(f'Setting {Q0[i]} vcz_amp_sq_{directions[i][0]} to 1') + print(f'Setting {Q0[i]} vcz_amp_fine_{directions[i][0]} to 0.5') + print(f'Setting {Q0[i]} vcz_amp_dac_at_11_02_{directions[i][0]} to 0.5') + lm.set(f'vcz_amp_sq_{directions[i][0]}', 1) + lm.set(f'vcz_amp_fine_{directions[i][0]}', .5) + lm.set(f'vcz_amp_dac_at_11_02_{directions[i][0]}', .5) + for i, lm in enumerate(Flux_lm_1): + print(f'Setting {Q1[i]} vcz_amp_dac_at_11_02_{directions[i][1]} to 0') + lm.set(f'vcz_amp_dac_at_11_02_{directions[i][1]}', 0) + # Look for Tp values + if Tp: + if isinstance(Tp, str): + Tp = [Tp] + else: + Tp = [lm.get(f'vcz_time_single_sq_{directions[i][0]}')*2 for i, lm in enumerate(Flux_lm_0)] + assert len(Q0) == len(Tp) + ####################### + # Load phase pulses + ####################### + for i, q in enumerate(Q0): + # only on the CZ qubits we add the ef pulses + mw_lutman = self.find_instrument(q).instr_LutMan_MW.get_instr() + lm = mw_lutman.LutMap() + # we hardcode the X on the ef transition to CW 31 here. + lm[27] = {'name': 'rXm180', 'phi': 0, 'theta': -180, 'type': 'ge'} + lm[31] = {"name": "rX12", "theta": 180, "phi": 0, "type": "ef"} + # load_phase_pulses will also upload other waveforms + mw_lutman.load_phase_pulses_to_AWG_lookuptable() + # Wrapper function for conditional oscillation detector function. + def wrapper(Q0, Q1, + prepare_for_timedomain, + downsample_swp_points, + extract_only, + disable_metadata): + a = self.measure_conditional_oscillation_multi( + pairs=[[Q0[i], Q1[i]] for i in range(len(Q0))], + parked_qbs=Q_parks, + flux_codeword=flux_codeword, + prepare_for_timedomain=prepare_for_timedomain, + downsample_swp_points=downsample_swp_points, + extract_only=extract_only, + disable_metadata=disable_metadata, + verbose=False) + cp = { f'phi_cond_{i+1}' : a[f'pair_{i+1}_delta_phi_a']\ + for i in range(len(Q0)) } + mf = { f'missing_fraction_{i+1}' : a[f'pair_{i+1}_missing_frac_a']\ + for i in range(len(Q0)) } + return { **cp, **mf} + + d = det.Function_Detector( + wrapper, + msmt_kw={'Q0' : Q0, 'Q1' : Q1, + 'prepare_for_timedomain' : False, + 'downsample_swp_points': 3, + 'extract_only': True, + 'disable_metadata': True}, + result_keys=list(np.array([[f'phi_cond_{i+1}', f'missing_fraction_{i+1}']\ + for i in range(len(Q0))]).flatten()), + value_names=list(np.array([[f'conditional_phase_{i+1}', f'missing_fraction_{i+1}']\ + for i in range(len(Q0))]).flatten()), + value_units=list(np.array([['deg', '%']\ + for i in range(len(Q0))]).flatten())) + nested_MC.set_detector_function(d) + + swf1 = swf.multi_sweep_function_ranges( + sweep_functions=[Flux_lm_0[i].cfg_awg_channel_amplitude\ + for i in range(len(Q0))], + sweep_ranges= A_ranges, + n_points=A_points) + swf2 = swf.flux_t_middle_sweep( + fl_lm_tm = list(np.array([[Flux_lm_0[i], Flux_lm_1[i] ]\ + for i in range(len(Q0))]).flatten()), + fl_lm_park = Flux_lms_park, + which_gate = list(np.array(directions).flatten()), + t_pulse = Tp, + duration = flux_pulse_duration) + nested_MC.set_sweep_function(swf1) + nested_MC.set_sweep_points(np.arange(A_points)) + nested_MC.set_sweep_function_2D(swf2) + nested_MC.set_sweep_points_2D(T_mids) + MC.live_plot_enabled(False) + nested_MC.run(f'VCZ_Amp_vs_Tmid_{Q0}_{Q1}_{Q_parks}', + mode='2D', disable_snapshot_metadata=disable_metadata) + # MC.live_plot_enabled(True) + ma2.tqg.VCZ_tmid_Analysis(Q0=Q0, Q1=Q1, + A_ranges=A_ranges, + label='VCZ_Amp_vs_Tmid') + + def measure_vcz_A_B_landscape( + self, + Q0, Q1, + A_ranges, + A_points: int, + B_amps: list, + Q_parks: list = None, + update_flux_params: bool = False, + flux_codeword: str = 'cz', + prepare_for_timedomain: bool = True, + disable_metadata: bool = False): + """ + Perform 2D sweep of amplitude and wave parameter while measuring + conditional phase and missing fraction via the "conditional + oscillation" experiment. + + Q0 : High frequency qubit(s). Can be given as single qubit or list. + Q1 : Low frequency qubit(s). Can be given as single qubit or list. + T_mids : list of vcz "T_mid" values to sweep. + A_ranges : list of tuples containing ranges of amplitude sweep. + A_points : Number of points to sweep for amplitude range. + Q_parks : list of qubits parked during operation. + """ + if isinstance(Q0, str): + Q0 = [Q0] + if isinstance(Q1, str): + Q1 = [Q1] + assert len(Q0) == len(Q1) + MC = self.instr_MC.get_instr() + nested_MC = self.instr_nested_MC.get_instr() + # get gate directions + directions = [get_gate_directions(q0, q1) for q0, q1 in zip(Q0, Q1)] + Flux_lm_0 = [self.find_instrument(q0).instr_LutMan_Flux.get_instr() for q0 in Q0] + Flux_lm_1 = [self.find_instrument(q1).instr_LutMan_Flux.get_instr() for q1 in Q1] + Flux_lms_park = [self.find_instrument(q).instr_LutMan_Flux.get_instr() for q in Q_parks] + # Prepare for time domain + if prepare_for_timedomain: + # Time-domain preparation + self.prepare_for_timedomain( + qubits=np.array([[Q0[i],Q1[i]] for i in range(len(Q0))]).flatten(), + bypass_flux=True) + for i, lm in enumerate(Flux_lm_0): + print(f'Setting {Q0[i]} vcz_amp_sq_{directions[i][0]} to 1') + print(f'Setting {Q0[i]} vcz_amp_dac_at_11_02_{directions[i][0]} to 0.5') + lm.set(f'vcz_amp_sq_{directions[i][0]}', 1) + lm.set(f'vcz_amp_dac_at_11_02_{directions[i][0]}', .5) + for i, lm in enumerate(Flux_lm_1): + print(f'Setting {Q1[i]} vcz_amp_dac_at_11_02_{directions[i][1]} to 0') + lm.set(f'vcz_amp_dac_at_11_02_{directions[i][1]}', 0) + # Update two qubit gate parameters + if update_flux_params: + # List of current flux lutman amplitudes + Amps_11_02 = [{ d: lm.get(f'vcz_amp_dac_at_11_02_{d}')\ + for d in ['NW', 'NE', 'SW', 'SE']} for lm in Flux_lm_0] + # List of parking amplitudes + Amps_park = [ lm.get('park_amp') for lm in Flux_lm_0 ] + # List of current flux lutman channel gains + Old_gains = [ lm.get('cfg_awg_channel_amplitude') for lm in Flux_lm_0] + ########################### + # Load phase pulses + ########################### + for i, q in enumerate(Q0): + # only on the CZ qubits we add the ef pulses + mw_lutman = self.find_instrument(q).instr_LutMan_MW.get_instr() + lm = mw_lutman.LutMap() + # we hardcode the X on the ef transition to CW 31 here. + lm[27] = {'name': 'rXm180', 'phi': 0, 'theta': -180, 'type': 'ge'} + lm[31] = {"name": "rX12", "theta": 180, "phi": 0, "type": "ef"} + # load_phase_pulses will also upload other waveforms + mw_lutman.load_phase_pulses_to_AWG_lookuptable() + # Wrapper function for conditional oscillation detector function. + def wrapper(Q0, Q1, + prepare_for_timedomain, + downsample_swp_points, + extract_only, + disable_metadata): + a = self.measure_conditional_oscillation_multi( + pairs=[[Q0[i], Q1[i]] for i in range(len(Q0))], + parked_qbs=Q_parks, + flux_codeword=flux_codeword, + prepare_for_timedomain=prepare_for_timedomain, + downsample_swp_points=downsample_swp_points, + extract_only=extract_only, + disable_metadata=disable_metadata, + verbose=False) + cp = { f'phi_cond_{i+1}' : a[f'pair_{i+1}_delta_phi_a']\ + for i in range(len(Q0)) } + mf = { f'missing_fraction_{i+1}' : a[f'pair_{i+1}_missing_frac_a']\ + for i in range(len(Q0)) } + return { **cp, **mf} + + d = det.Function_Detector( + wrapper, + msmt_kw={'Q0' : Q0, 'Q1' : Q1, + 'prepare_for_timedomain' : False, + 'downsample_swp_points': 3, + 'extract_only': True, + 'disable_metadata': True}, + result_keys=list(np.array([[f'phi_cond_{i+1}', f'missing_fraction_{i+1}']\ + for i in range(len(Q0))]).flatten()), + value_names=list(np.array([[f'conditional_phase_{i+1}', f'missing_fraction_{i+1}']\ + for i in range(len(Q0))]).flatten()), + value_units=list(np.array([['deg', '%']\ + for i in range(len(Q0))]).flatten())) + nested_MC.set_detector_function(d) + + swf1 = swf.multi_sweep_function_ranges( + sweep_functions=[Flux_lm_0[i].cfg_awg_channel_amplitude + for i in range(len(Q0))], + sweep_ranges= A_ranges, + n_points=A_points) + swfs = [swf.FLsweep(lm = lm, + par = lm.parameters[f'vcz_amp_fine_{directions[i][0]}'], + waveform_name = f'cz_{directions[i][0]}') + for i, lm in enumerate(Flux_lm_0) ] + swf2 = swf.multi_sweep_function(sweep_functions=swfs) + nested_MC.set_sweep_function(swf1) + nested_MC.set_sweep_points(np.arange(A_points)) + nested_MC.set_sweep_function_2D(swf2) + nested_MC.set_sweep_points_2D(B_amps) + + MC.live_plot_enabled(False) + nested_MC.run(f'VCZ_Amp_vs_B_{Q0}_{Q1}_{Q_parks}', + mode='2D', disable_snapshot_metadata=disable_metadata) + # MC.live_plot_enabled(True) + a = ma2.tqg.VCZ_B_Analysis(Q0=Q0, Q1=Q1, + A_ranges=A_ranges, + directions=directions, + label='VCZ_Amp_vs_B') + ################################### + # Update flux parameters + ################################### + if update_flux_params: + print('Updating flux lutman parameters:') + def _set_amps_11_02(amps, lm, verbose=True): + ''' + Helper function to set amplitudes in Flux_lutman + ''' + for d in amps.keys(): + lm.set(f'vcz_amp_dac_at_11_02_{d}', amps[d]) + if verbose: + print(f'Set {lm.name}.vcz_amp_dac_at_11_02_{d} to {amps[d]}') + # Update channel gains for each gate + Opt_gains = [ a.qoi[f'Optimal_amps_{q}'][0] for q in Q0 ] + Opt_Bvals = [ a.qoi[f'Optimal_amps_{q}'][1] for q in Q0 ] + + for i in range(len(Q0)): + # If new channel gain is higher than old gain then scale dac + # values accordingly: new_dac = old_dac*(old_gain/new_gain) + if Opt_gains[i] > Old_gains[i]: + Flux_lm_0[i].set('cfg_awg_channel_amplitude', Opt_gains[i]) + print(f'Set {Flux_lm_0[i].name}.cfg_awg_channel_amplitude to {Opt_gains[i]}') + for d in ['NW', 'NE', 'SW', 'SE']: + Amps_11_02[i][d] *= Old_gains[i]/Opt_gains[i] + Amps_11_02[i][directions[i][0]] = 0.5 + Amps_park[i] *= Old_gains[i]/Opt_gains[i] + # If new channel gain is lower than old gain, then choose + # dac value for measured gate based on old gain + else: + Flux_lm_0[i].set('cfg_awg_channel_amplitude', Old_gains[i]) + print(f'Set {Flux_lm_0[i].name}.cfg_awg_channel_amplitude to {Old_gains[i]}') + Amps_11_02[i][directions[i][0]] = 0.5*Opt_gains[i]/Old_gains[i] + # Set flux_lutman amplitudes + _set_amps_11_02(Amps_11_02[i], Flux_lm_0[i]) + Flux_lm_0[i].set(f'vcz_amp_fine_{directions[i][0]}', Opt_Bvals[i]) + Flux_lm_0[i].set(f'park_amp', Amps_park[i]) + return a.qoi + + def measure_unipolar_A_t_landscape( + self, + Q0, Q1, + A_ranges, + A_points: int, + times: list, + Q_parks: list = None, + update_flux_params: bool = False, + flux_codeword: str = 'sf_square', + prepare_for_timedomain: bool = True, + disable_metadata: bool = False): + """ + Perform 2D sweep of amplitude and wave parameter while measuring + conditional phase and missing fraction via the "conditional + oscillation" experiment. + + Q0 : High frequency qubit(s). Can be given as single qubit or list. + Q1 : Low frequency qubit(s). Can be given as single qubit or list. + T_mids : list of vcz "T_mid" values to sweep. + A_ranges : list of tuples containing ranges of amplitude sweep. + A_points : Number of points to sweep for amplitude range. + Q_parks : list of qubits parked during operation. + """ + if isinstance(Q0, str): + Q0 = [Q0] + if isinstance(Q1, str): + Q1 = [Q1] + assert len(Q0) == len(Q1) + MC = self.instr_MC.get_instr() + nested_MC = self.instr_nested_MC.get_instr() + # get gate directions + directions = [get_gate_directions(q0, q1) for q0, q1 in zip(Q0, Q1)] + Flux_lm_0 = [self.find_instrument(q0).instr_LutMan_Flux.get_instr() for q0 in Q0] + Flux_lm_1 = [self.find_instrument(q1).instr_LutMan_Flux.get_instr() for q1 in Q1] + Flux_lms_park = [self.find_instrument(q).instr_LutMan_Flux.get_instr() for q in Q_parks] + # Prepare for time domain + if prepare_for_timedomain: + # Time-domain preparation + self.prepare_for_timedomain( + qubits=np.array([[Q0[i],Q1[i]] for i in range(len(Q0))]).flatten(), + bypass_flux=True) + for i, lm in enumerate(Flux_lm_0): + print(f'Setting {Q0[i]} sq_amp to -.5') + lm.set(f'sq_amp', -0.5) + ########################### + # Load phase pulses + ########################### + for i, q in enumerate(Q0): + # only on the CZ qubits we add the ef pulses + mw_lutman = self.find_instrument(q).instr_LutMan_MW.get_instr() + lm = mw_lutman.LutMap() + # we hardcode the X on the ef transition to CW 31 here. + lm[27] = {'name': 'rXm180', 'phi': 0, 'theta': -180, 'type': 'ge'} + lm[31] = {"name": "rX12", "theta": 180, "phi": 0, "type": "ef"} + # load_phase_pulses will also upload other waveforms + mw_lutman.load_phase_pulses_to_AWG_lookuptable() + # Wrapper function for conditional oscillation detector function. + def wrapper(Q0, Q1, + prepare_for_timedomain, + downsample_swp_points, + extract_only, + disable_metadata): + a = self.measure_conditional_oscillation_multi( + pairs=[[Q0[i], Q1[i]] for i in range(len(Q0))], + parked_qbs=Q_parks, + flux_codeword=flux_codeword, + prepare_for_timedomain=prepare_for_timedomain, + downsample_swp_points=downsample_swp_points, + extract_only=extract_only, + disable_metadata=disable_metadata, + verbose=False) + cp = { f'phi_cond_{i+1}' : a[f'pair_{i+1}_delta_phi_a']\ + for i in range(len(Q0)) } + mf = { f'missing_fraction_{i+1}' : a[f'pair_{i+1}_missing_frac_a']\ + for i in range(len(Q0)) } + return { **cp, **mf} + + d = det.Function_Detector( + wrapper, + msmt_kw={'Q0' : Q0, 'Q1' : Q1, + 'prepare_for_timedomain' : False, + 'downsample_swp_points': 3, + 'extract_only': True, + 'disable_metadata': True}, + result_keys=list(np.array([[f'phi_cond_{i+1}', f'missing_fraction_{i+1}']\ + for i in range(len(Q0))]).flatten()), + value_names=list(np.array([[f'conditional_phase_{i+1}', f'missing_fraction_{i+1}']\ + for i in range(len(Q0))]).flatten()), + value_units=list(np.array([['deg', '%']\ + for i in range(len(Q0))]).flatten())) + nested_MC.set_detector_function(d) + + swf1 = swf.multi_sweep_function_ranges( + sweep_functions=[Flux_lm_0[i].cfg_awg_channel_amplitude + for i in range(len(Q0))], + sweep_ranges= A_ranges, + n_points=A_points) + swfs = [swf.FLsweep(lm = lm, + par = lm.parameters['sq_length'], + waveform_name = 'square') + for i, lm in enumerate(Flux_lm_0) ] + swf2 = swf.multi_sweep_function(sweep_functions=swfs) + nested_MC.set_sweep_function(swf1) + nested_MC.set_sweep_points(np.arange(A_points)) + nested_MC.set_sweep_function_2D(swf2) + nested_MC.set_sweep_points_2D(times) + + MC.live_plot_enabled(False) + nested_MC.run(f'Unipolar_Amp_vs_t_{Q0}_{Q1}_{Q_parks}', + mode='2D', disable_snapshot_metadata=disable_metadata) + # MC.live_plot_enabled(True) + a = ma2.tqg.VCZ_B_Analysis(Q0=Q0, Q1=Q1, + A_ranges=A_ranges, + directions=directions, + label='Unipolar_Amp_vs_t') + ################################### + # Update flux parameters + ################################### + if update_flux_params: + pass + return a.qoi + + def measure_parity_check_ramsey( + self, + Q_target: list, + Q_control: list, + flux_cw_list: list, + control_cases: list = None, + Q_spectator: list = None, + pc_repetitions: int = 1, + downsample_angle_points: int = 1, + prepare_for_timedomain: bool = True, + disable_metadata: bool = False, + extract_only: bool = False, + analyze: bool = True, + solve_for_phase_gate_model: bool = False, + update_mw_phase: bool = False, + mw_phase_param: str = 'vcz_virtual_q_ph_corr_step_1', + wait_time_before_flux: int = 0, + wait_time_after_flux: int = 0): + """ + Perform conditional oscillation like experiment in the context of a + parity check. + + Q_target : Ancilla qubit where parity is projected. + Q_control : List of control qubits in parity check. + Q_spectator : Similar to control qubit, but will be treated as + spectator in analysis. + flux_cw_list : list of flux codewords to be played during the parity + check. + Control_cases : list of different control qubit states. Defaults to all + possible combinations of states. + """ + # assert len(Q_target) == 1 + assert self.ro_acq_weight_type().lower() == 'optimal' + MC = self.instr_MC.get_instr() + if Q_spectator: + Q_control += Q_spectator + if control_cases == None: + control_cases = ['{:0{}b}'.format(i, len(Q_control))\ + for i in range(2**len(Q_control))] + solve_for_phase_gate_model = True + else: + for case in control_cases: + assert len(case) == len(Q_control) + + qubit_list = Q_target + Q_control + if prepare_for_timedomain: + self.prepare_for_timedomain(qubits=qubit_list) + for q in Q_target: + mw_lm = self.find_instrument(q).instr_LutMan_MW.get_instr() + mw_lm.set_default_lutmap() + mw_lm.load_phase_pulses_to_AWG_lookuptable() + Q_target_idx = [self.find_instrument(q).cfg_qubit_nr() for q in Q_target] + Q_control_idx = [self.find_instrument(q).cfg_qubit_nr() for q in Q_control] + # These are hardcoded angles in the mw_lutman for the AWG8 + # only x2 and x3 downsample_swp_points available + angles = np.arange(0, 341, 20 * downsample_angle_points) + p = mqo.parity_check_ramsey( + Q_idxs_target = Q_target_idx, + Q_idxs_control = Q_control_idx, + control_cases = control_cases, + flux_cw_list = flux_cw_list, + platf_cfg = self.cfg_openql_platform_fn(), + angles = angles, + nr_spectators = len(Q_spectator) if Q_spectator else 0, + pc_repetitions=pc_repetitions, + wait_time_before_flux = wait_time_before_flux, + wait_time_after_flux = wait_time_after_flux + ) + s = swf.OpenQL_Sweep( + openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name="Cases", + unit="a.u." + ) + d = self.get_int_avg_det(qubits=qubit_list) + MC.set_sweep_function(s) + MC.set_sweep_points(p.sweep_points) + MC.set_detector_function(d) + label = f'Parity_check_ramsey_{"_".join(qubit_list)}' + if pc_repetitions != 1: + label += f'_x{pc_repetitions}' + label += self.msmt_suffix + MC.run(label, disable_snapshot_metadata=disable_metadata) + if analyze: + a = ma2.tqg.Parity_check_ramsey_analysis( + label=label, + Q_target = Q_target, + Q_control = Q_control, + Q_spectator = Q_spectator, + control_cases = control_cases, + angles = angles, + solve_for_phase_gate_model = solve_for_phase_gate_model, + extract_only = extract_only) + if update_mw_phase: + if type(mw_phase_param) is str: + mw_phase_param = [mw_phase_param for q in Q_target] + for q, param in zip(Q_target, mw_phase_param): + # update single qubit phase + Q = self.find_instrument(q) + mw_lm = Q.instr_LutMan_MW.get_instr() + # Make sure mw phase parameter is valid + assert param in mw_lm.parameters.keys() + # Calculate new virtual phase + phi0 = mw_lm.get(param) + phi_new = list(a.qoi['Phase_model'][Q.name].values())[0] + phi_new = phi_new / pc_repetitions # Divide by number of CZ repetitions + phi = np.mod(phi0+phi_new, 360) + mw_lm.set(param, phi) + print(f'{Q.name}.{param} changed to {phi} deg.') + return a.qoi + + def calibrate_parity_check_phase( + self, + Q_ancilla: list, + Q_control: list, + Q_pair_target: list, + flux_cw_list: list, + B_amps: list = None, + control_cases: list = None, + pc_repetitions: int = 1, + downsample_angle_points: int = 1, + prepare_for_timedomain: bool = True, + disable_metadata: bool = False, + extract_only: bool = True, + update_flux_param: bool = True, + update_mw_phase: bool = True, + mw_phase_param: str = 'vcz_virtual_q_ph_corr_step_1'): + """ + Calibrate the phase of a gate in a parity-check by performing a sweep + of the SNZ B parameter while measuring the parity check phase gate + coefficients. + + Q_ancilla : Ancilla qubit of the parity check. + Q_control : List of control qubits in parity check. + Q_pair_target : list of two qubits involved in the two qubit gate. Must + be given in the order [, ] + flux_cw_list : list of flux codewords to be played during the parity + check. + B_amps : List of B parameters to sweep through. + Control_cases : list of different control qubit states. Defaults to all + possible combinations of states. + """ + assert self.ro_acq_weight_type().lower() == 'optimal' + assert len(Q_ancilla) == 1 + qubit_list = Q_ancilla + Q_control + assert Q_pair_target[0] in qubit_list + assert Q_pair_target[1] in qubit_list + + MC = self.instr_MC.get_instr() + nested_MC = self.instr_nested_MC.get_instr() + + # get gate directions of two-qubit gate codewords + directions = get_gate_directions(Q_pair_target[0], + Q_pair_target[1]) + fl_lm = self.find_instrument(Q_pair_target[0]).instr_LutMan_Flux.get_instr() + fl_par = f'vcz_amp_fine_{directions[0]}' + B0 = fl_lm.get(fl_par) + if B_amps is None: + B_amps = np.linspace(-.1, .1, 3)+B0 + if np.min(B_amps) < 0: + B_amps -= np.min(B_amps) + if np.max(B_amps) > 1: + B_amps -= np.max(B_amps)-1 + + # Prepare for timedomain + if prepare_for_timedomain: + self.prepare_for_timedomain(qubits=qubit_list) + for q in Q_ancilla: + mw_lm = self.find_instrument(q).instr_LutMan_MW.get_instr() + mw_lm.set_default_lutmap() + mw_lm.load_phase_pulses_to_AWG_lookuptable() + # Wrapper function for parity check ramsey detector function. + def wrapper(Q_target, Q_control, + flux_cw_list, + downsample_angle_points, + extract_only): + a = self.measure_parity_check_ramsey( + Q_target = Q_target, + Q_control = Q_control, + flux_cw_list = flux_cw_list, + control_cases = None, + downsample_angle_points = downsample_angle_points, + prepare_for_timedomain = False, + pc_repetitions=pc_repetitions, + solve_for_phase_gate_model = True, + disable_metadata = True, + extract_only = extract_only) + pm = { f'Phase_model_{op}' : a['Phase_model'][Q_ancilla[0]][op]\ + for op in a['Phase_model'][Q_ancilla[0]].keys()} + mf = { f'missing_fraction_{q}' : a['Missing_fraction'][q]\ + for q in Q_control } + return { **pm, **mf} + n = len(Q_control) + Operators = ['{:0{}b}'.format(i, n).replace('0','I').replace('1','Z')\ + for i in range(2**n)] + d = det.Function_Detector( + wrapper, + msmt_kw={'Q_target' : Q_ancilla, + 'Q_control' : Q_control, + 'flux_cw_list': flux_cw_list, + 'downsample_angle_points': downsample_angle_points, + 'extract_only': extract_only}, + result_keys=[f'Phase_model_{op}' for op in Operators]+\ + [f'missing_fraction_{q}' for q in Q_control], + value_names=[f'Phase_model_{op}' for op in Operators]+\ + [f'missing_fraction_{q}' for q in Q_control], + value_units=['deg' for op in Operators]+\ + ['fraction' for q in Q_control]) + nested_MC.set_detector_function(d) + # Set sweep function + swf1 = swf.FLsweep( + lm = fl_lm, + par = fl_lm.parameters[fl_par], + waveform_name = f'cz_{directions[0]}') + nested_MC.set_sweep_function(swf1) + nested_MC.set_sweep_points(B_amps) + + MC.live_plot_enabled(False) + label = f'Parity_check_calibration_gate_{"_".join(Q_pair_target)}' + nested_MC.run(label, disable_snapshot_metadata=disable_metadata) + # MC.live_plot_enabled(True) + + a = ma2.tqg.Parity_check_calibration_analysis( + Q_ancilla = Q_ancilla, + Q_control = Q_control, + Q_pair_target = Q_pair_target, + B_amps = B_amps, + label = label) + if update_flux_param: + try : + if (a.qoi['Optimal_B']>0) and (a.qoi['Optimal_B']<1): + # update flux parameter + fl_lm.set(fl_par, a.qoi['Optimal_B']) + elif a.qoi['Optimal_B']<0: + fl_lm.set(fl_par, 0) + elif a.qoi['Optimal_B']>1: + fl_lm.set(fl_par, 1) + except: + fl_lm.set(fl_par, B0) + raise ValueError(f'B amplitude {a.qoi["Optimal_B"]:.3f} not valid. '+\ + f'Resetting {fl_par} to {B0:.3f}.') + else: + fl_lm.set(fl_par, B0) + print(f'Resetting {fl_par} to {B0:.3f}.') + + if update_mw_phase: + # update single qubit phase + Qa = self.find_instrument(Q_ancilla[0]) + mw_lm = Qa.instr_LutMan_MW.get_instr() + # Make sure mw phase parameter is valid + assert mw_phase_param in mw_lm.parameters.keys() + # Calculate new virtual phase + phi0 = mw_lm.get(mw_phase_param) + phi = np.mod(phi0+a.qoi['Phase_offset'], 360) + mw_lm.set(mw_phase_param, phi) + + return a.qoi + + def calibrate_park_frequency( + self, + qH: str, + qL: str, + qP: str, + Park_distances: list = np.arange(300e6, 1000e6, 5e6), + flux_cw: str = 'cz', + relative_to_qH: bool = True, + extract_only: bool = False, + prepare_for_timedomain: bool = True, + disable_metadata: bool = False): + """ + Calibrate the parking amplitude of a spectator for a given two-qubit + gate. Does this by sweeping the parking frequency while measuring the + conditional phases and missing fraction of the three qubits involved. + + qH : High frequency qubit in two-qubit gate. + qL : Low frequency qubit in two-qubit gate. + qP : Parked qubit on which we'll sweep park frequency. + flux_cw : flux codeword of two-qubit gate. + Park_distances : List of Park sweep (frequency) distances to low + frequency qubit during the two-qubit gate. + """ + assert self.ro_acq_weight_type() == 'optimal' + # Get measurement control instances + MC = self.instr_MC.get_instr() + nested_MC = self.instr_nested_MC.get_instr() + # setup up measurement + dircts = get_gate_directions(qH, qL) + Q_H = self.find_instrument(qH) + Q_L = self.find_instrument(qL) + Q_P = self.find_instrument(qP) + flux_lm_H = Q_H.instr_LutMan_Flux.get_instr() + flux_lm_L = Q_L.instr_LutMan_Flux.get_instr() + flux_lm_P = Q_P.instr_LutMan_Flux.get_instr() + # Prepare for timedomain + if prepare_for_timedomain: + self.prepare_for_timedomain(qubits=[qH, qL, qP]) + # Upload phase pulses on qH + mw_lm_H = Q_H.instr_LutMan_MW.get_instr() + mw_lm_H.set_default_lutmap() + mw_lm_H.load_phase_pulses_to_AWG_lookuptable() + # Wrapper function for parity check ramsey detector function. + def wrapper(): + # downsampling factor (makes sweep faster!) + downsample = 3 + self.measure_parity_check_ramsey( + Q_target = [qH], + Q_control = [qL], + Q_spectator = [qP], + flux_cw_list = [flux_cw], + prepare_for_timedomain = False, + downsample_angle_points = downsample, + update_mw_phase=False, + analyze=False, + disable_metadata=True) + # Analyze + a = ma2.tqg.Parity_check_ramsey_analysis( + Q_target = [qH], + Q_control = [qL, qP], + Q_spectator = [qP], + control_cases = ['{:0{}b}'.format(i, 2) for i in range(4)], + angles = np.arange(0, 341, 20*downsample), + solve_for_phase_gate_model = True, + extract_only = True) + # Get residual ZZ phase + phi = a.proc_data_dict['Fit_res'][qH]['00'][0] + phi_s = a.proc_data_dict['Fit_res'][qH]['01'][0] + delta_phi = phi_s-phi + phi = np.mod(phi+180, 360)-180 + phi_s = np.mod(phi_s+180, 360)-180 + delta_phi = np.mod(delta_phi+180, 360)-180 + # Conditional phase difference + phi_cond = a.proc_data_dict['Fit_res'][qH]['00'][0]-a.proc_data_dict['Fit_res'][qH]['10'][0] + phi_cond_s = a.proc_data_dict['Fit_res'][qH]['01'][0]-a.proc_data_dict['Fit_res'][qH]['11'][0] + delta_phi_cond = phi_cond_s-phi_cond + phi_cond = np.mod(phi_cond, 360) + phi_cond_s = np.mod(phi_cond_s, 360) + delta_phi_cond = np.mod(delta_phi_cond+180, 360)-180 + # Missing fraction + miss_frac = a.proc_data_dict['P_excited'][qL]['10']-a.proc_data_dict['P_excited'][qL]['00'] + miss_frac_s = a.proc_data_dict['P_excited'][qL]['11']-a.proc_data_dict['P_excited'][qL]['01'] + delta_miss_frac = miss_frac_s-miss_frac + # result dictionary + _r = {'phi': phi, 'phi_s': phi_s, 'delta_phi': delta_phi, + 'phi_cond': phi_cond, 'phi_cond_s': phi_cond_s, 'delta_phi_cond': delta_phi_cond, + 'miss_frac': miss_frac, 'miss_frac_s': miss_frac_s, 'delta_miss_frac': delta_miss_frac} + return _r + d = det.Function_Detector( + wrapper, + msmt_kw={}, + result_keys=['phi', 'phi_s', 'delta_phi', + 'phi_cond', 'phi_cond_s', 'delta_phi_cond', + 'miss_frac', 'miss_frac_s', 'delta_miss_frac'], + value_names=['phi', 'phi_s', 'delta_phi', + 'phi_cond', 'phi_cond_s', 'delta_phi_cond', + 'miss_frac', 'miss_frac_s', 'delta_miss_frac'], + value_units=['deg', 'deg', 'deg', 'deg', 'deg', 'deg', + 'fraction', 'fraction', 'fraction']) + nested_MC.set_detector_function(d) + # Set sweep function + swf1 = swf.FLsweep( + lm = flux_lm_P, + par = flux_lm_P.parameters['park_amp'], + waveform_name = 'park') + nested_MC.set_sweep_function(swf1) + # Get parking amplitudes based on parking distances + Park_detunings = [] + Park_amps = [] + for park_dist in Park_distances: + # calculate detuning of qH during 2Q-gate + det_qH = get_frequency_waveform(f'vcz_amp_dac_at_11_02_{dircts[0]}', + flux_lm_H) + det_qL = get_frequency_waveform(f'vcz_amp_dac_at_11_02_{dircts[1]}', + flux_lm_L) + # calculate required detuning of qP during 2Q-gate + park_det = park_dist + if relative_to_qH: + park_freq = Q_H.freq_qubit()-det_qH-park_dist + park_det = Q_P.freq_qubit()-park_freq + Park_detunings.append(park_det) + # Only park if the qubit is closer than then 350 MHz + amp_park = get_DAC_amp_frequency(park_det, flux_lm_P) + Park_amps.append(amp_park) + + # If parking distance results in negative detuning, clip those values + idx = np.where(np.array(Park_detunings)>0)[0] + Park_amps = np.array(Park_amps)[idx] + Park_distances = np.array(Park_distances)[idx] + # set sweeping park amps + nested_MC.set_sweep_points(Park_amps) + # Measure! + MC.live_plot_enabled(False) + label = f'Park_frequency_calibration_gate_{qH}_{qL}_park_{qP}' + try: + nested_MC.run(label, disable_snapshot_metadata=disable_metadata) + except: + print_exception() + self.msmt_suffix = '_device' + # MC.live_plot_enabled(True) + # Run analysis + # if False: + a = ma2.tqg.Park_frequency_sweep_analysis( + label=label, + qH=qH, qL=qL, qP=qP, + Parking_distances=Park_distances, + alpha_qH=Q_H.anharmonicity()) + + def calibrate_parity_check_park_new( + self, + Q_neighbors: list, + Q_ancilla: str, + Q_park: str, + flux_cw_list: list, + Park_amps: list, + downsample_angle_points: int = 1, + prepare_for_timedomain: bool = True, + extract_only: bool = False, + update_park_amp: bool = True): + """ + """ + qubit_list = Q_neighbors + [Q_ancilla] + [Q_park] + + MC = self.instr_MC.get_instr() + nested_MC = self.instr_nested_MC.get_instr() + + fl_lm = self.find_instrument(Q_park).instr_LutMan_Flux.get_instr() + fl_par = 'park_amp' + P0 = fl_lm.get(fl_par) + + # Prepare for timedomain + if prepare_for_timedomain: + self.prepare_for_timedomain(qubits=qubit_list) + for q in Q_neighbors: + mw_lm = self.find_instrument(q).instr_LutMan_MW.get_instr() + mw_lm.set_default_lutmap() + mw_lm.load_phase_pulses_to_AWG_lookuptable() + # Wrapper function for parity check ramsey detector function. + def wrapper(Q_neighbors, + Q_park, + Q_ancilla, + flux_cw_list, + downsample_angle_points, + extract_only): + a = self.measure_parity_check_ramsey( + Q_target = Q_neighbors, + Q_control = [Q_ancilla], + Q_spectator = [Q_park], + flux_cw_list = flux_cw_list, + downsample_angle_points = downsample_angle_points, + prepare_for_timedomain = False, + disable_metadata = True, + extract_only = extract_only) + Phase_coeff = {} + for q in Q_neighbors: + Phase_coeff['_'.join([q]+[Q_park])] = a['Phase_model'][q]['IZ'] + Phase_coeff['_'.join([q]+[Q_ancilla]+[Q_park])] = \ + a['Phase_model'][q]['ZZ'] + missing_fraction = a['Missing_fraction'] + return {**Phase_coeff, **missing_fraction} + + keys = [] + for q in Q_neighbors: + keys.append(f'{q}_{Q_park}') + keys.append(f'{q}_{Q_ancilla}_{Q_park}') + d = det.Function_Detector( + wrapper, + msmt_kw={'Q_neighbors' : Q_neighbors, + 'Q_park' : Q_park, + 'Q_ancilla' : Q_ancilla, + 'flux_cw_list': flux_cw_list, + 'downsample_angle_points': downsample_angle_points, + 'extract_only': extract_only}, + result_keys=keys+[Q_ancilla]+[Q_park], + value_names=[f'Phase_coeff_{k}' for k in keys]+\ + [f'missing_fraction_{q}' for q in [Q_ancilla, Q_park]], + value_units=['deg' for k in keys]+\ + ['missing_fraction' for i in range(2)]) + nested_MC.set_detector_function(d) + # Set sweep function + swf1 = swf.FLsweep( + lm = fl_lm, + par = fl_lm.parameters[fl_par], + waveform_name = 'park') + nested_MC.set_sweep_function(swf1) + nested_MC.set_sweep_points(Park_amps) + + # MC.live_plot_enabled(False) + label = f'Parity_check_calibration_park_{Q_park}_{"_".join(Q_neighbors)}_{Q_ancilla}' + nested_MC.run(label) + # MC.live_plot_enabled(True) + + if update_park_amp: + pass + # P_opt = a.qoi['Amp_opt'] + # fl_lm.set(fl_par, P_opt) + # print(f'Park amplitude of {Q_park_target} set to {P_opt}.') + else: + fl_lm.set(fl_par, P0) + print(f'Park amplitude of {Q_park} reset to {P0}.') + + def calibrate_vcz_flux_offset( + self, + Q0: str, Q1: str, + Offsets: list = None, + Q_parks: list = None, + update_params: bool = True, + flux_codeword: str = 'cz', + disable_metadata: bool = False): + """ + Perform a sweep of flux offset of high freq. qubit while measuring + conditional phase and missing fraction via the "conditional + oscillation" experiment. + + Q0 : High frequency qubit. Can be given as single qubit or list. + Q1 : Low frequency qubit. Can be given as single qubit or list. + Offsets : Offsets of pulse asymmetry. + Q_parks : list of qubits parked during operation. + """ + MC = self.instr_MC.get_instr() + nested_MC = self.instr_nested_MC.get_instr() + if Offsets == None: + Q_inst = self.find_instrument(Q0) + dc_off = Q_inst.fl_dc_I0() + if 'D' in Q0: + # When the Q0 is a data qubit, it can only be a high - mid + # CZ gate type. When this happens we want higher range + Offsets = np.linspace(-40e-6, 40e-6, 7)+dc_off + else: + # When the Q0 is not a data qubit, it can only be a mid - low + # CZ gate type. When this happens we want lower range + Offsets = np.linspace(-20e-6, 20e-6, 7)+dc_off + # Time-domain preparation + self.prepare_for_timedomain( + qubits=[Q0, Q1], + bypass_flux=False) + ########################### + # Load phase pulses + ########################### + # only on the CZ qubits we add the ef pulses + mw_lutman = self.find_instrument(Q0).instr_LutMan_MW.get_instr() + lm = mw_lutman.LutMap() + # we hardcode the X on the ef transition to CW 31 here. + lm[27] = {'name': 'rXm180', 'phi': 0, 'theta': -180, 'type': 'ge'} + lm[31] = {"name": "rX12", "theta": 180, "phi": 0, "type": "ef"} + # load_phase_pulses will also upload other waveforms + mw_lutman.load_phase_pulses_to_AWG_lookuptable() + # Wrapper function for conditional oscillation detector function. + def wrapper(Q0, Q1, + prepare_for_timedomain, + downsample_swp_points, + extract_only, + disable_metadata): + a = self.measure_conditional_oscillation_multi( + pairs=[[Q0, Q1]], + parked_qbs=Q_parks, + flux_codeword=flux_codeword, + prepare_for_timedomain=prepare_for_timedomain, + downsample_swp_points=downsample_swp_points, + extract_only=extract_only, + disable_metadata=disable_metadata, + verbose=False) + cp = { f'phi_cond_{1}' : a[f'pair_{1}_delta_phi_a']} + mf = { f'missing_fraction_{1}' : a[f'pair_{1}_missing_frac_a']} + return { **cp, **mf} + d = det.Function_Detector( + wrapper, + msmt_kw={'Q0' : Q0, 'Q1' : Q1, + 'prepare_for_timedomain' : False, + 'downsample_swp_points': 3, + 'extract_only': True, + 'disable_metadata': True}, + result_keys=[f'phi_cond_{1}', f'missing_fraction_{1}'], + value_names=[f'conditional_phase_{1}', f'missing_fraction_{1}'], + value_units=['deg', '%']) + nested_MC.set_detector_function(d) + Q_inst = self.find_instrument(Q0) + swf1 = Q_inst.instr_FluxCtrl.get_instr()[Q_inst.fl_dc_ch()] + nested_MC.set_sweep_function(swf1) + nested_MC.set_sweep_points(Offsets) + try: + MC.live_plot_enabled(False) + nested_MC.run(f'VCZ_flux_offset_sweep_{Q0}_{Q1}_{Q_parks}', mode='1D', + disable_snapshot_metadata=disable_metadata) + MC.live_plot_enabled(True) + a = ma2.tqg.VCZ_flux_offset_sweep_Analysis(label='VCZ_flux_offset_sweep') + except: + print_exception() + print(f'Resetting flux offset of {Q0}...') + swf1.set(Q_inst.fl_dc_I0()) + ################################ + # Update (or reset) params + ################################ + if update_params: + swf1(a.qoi[f'offset_opt']) + Q_inst.fl_dc_I0(a.qoi[f'offset_opt']) + print(f'Updated {swf1.name} to {a.qoi[f"offset_opt"]*1e3:.3f}mA') + + def calibrate_vcz_asymmetry( + self, + Q0, Q1, + Asymmetries: list = np.linspace(-.005, .005, 7), + Q_parks: list = None, + prepare_for_timedomain = True, + update_params: bool = True, + flux_codeword: str = 'cz', + disable_metadata: bool = False): + """ + Perform a sweep of vcz pulse asymmetry while measuring + conditional phase and missing fraction via the "conditional + oscillation" experiment. + + Q0 : High frequency qubit(s). Can be given as single qubit or list. + Q1 : Low frequency qubit(s). Can be given as single qubit or list. + Offsets : Offsets of pulse asymmetry. + Q_parks : list of qubits parked during operation. + """ + if isinstance(Q0, str): + Q0 = [Q0] + if isinstance(Q1, str): + Q1 = [Q1] + assert len(Q0) == len(Q1) + MC = self.instr_MC.get_instr() + nested_MC = self.instr_nested_MC.get_instr() + # get gate directions + directions = [get_gate_directions(q0, q1) for q0, q1 in zip(Q0, Q1)] + Flux_lm_0 = [self.find_instrument(q0).instr_LutMan_Flux.get_instr() for q0 in Q0] + Flux_lm_1 = [self.find_instrument(q1).instr_LutMan_Flux.get_instr() for q1 in Q1] + Flux_lms_park = [self.find_instrument(q).instr_LutMan_Flux.get_instr() for q in Q_parks] + # Make sure asymmetric pulses are enabled + for i, flux_lm in enumerate(Flux_lm_0): + param = flux_lm.parameters[f'vcz_use_asymmetric_amp_{directions[i][0]}'] + assert param() == True , 'Asymmetric pulses must be enabled.' + if prepare_for_timedomain: + # Time-domain preparation + self.prepare_for_timedomain( + qubits=np.array([[Q0[i],Q1[i]] for i in range(len(Q0))]).flatten(), + bypass_flux=True) + ########################### + # Load phase pulses + ########################### + for i, q in enumerate(Q0): + # only on the CZ qubits we add the ef pulses + mw_lutman = self.find_instrument(q).instr_LutMan_MW.get_instr() + lm = mw_lutman.LutMap() + # we hardcode the X on the ef transition to CW 31 here. + lm[27] = {'name': 'rXm180', 'phi': 0, 'theta': -180, 'type': 'ge'} + lm[31] = {"name": "rX12", "theta": 180, "phi": 0, "type": "ef"} + # load_phase_pulses will also upload other waveforms + mw_lutman.load_phase_pulses_to_AWG_lookuptable() + # Wrapper function for conditional oscillation detector function. + def wrapper(Q0, Q1, + prepare_for_timedomain, + downsample_swp_points, + extract_only, + disable_metadata): + a = self.measure_conditional_oscillation_multi( + pairs=[[Q0[i], Q1[i]] for i in range(len(Q0))], + parked_qbs=Q_parks, + flux_codeword=flux_codeword, + prepare_for_timedomain=prepare_for_timedomain, + downsample_swp_points=downsample_swp_points, + extract_only=extract_only, + disable_metadata=disable_metadata, + verbose=False) + cp = { f'phi_cond_{i+1}' : a[f'pair_{i+1}_delta_phi_a']\ + for i in range(len(Q0)) } + mf = { f'missing_fraction_{i+1}' : a[f'pair_{i+1}_missing_frac_a']\ + for i in range(len(Q0)) } + return { **cp, **mf} + + d = det.Function_Detector( + wrapper, + msmt_kw={'Q0' : Q0, 'Q1' : Q1, + 'prepare_for_timedomain' : False, + 'downsample_swp_points': 3, + 'extract_only': True, + 'disable_metadata': True}, + result_keys=list(np.array([[f'phi_cond_{i+1}', f'missing_fraction_{i+1}']\ + for i in range(len(Q0))]).flatten()), + value_names=list(np.array([[f'conditional_phase_{i+1}', f'missing_fraction_{i+1}']\ + for i in range(len(Q0))]).flatten()), + value_units=list(np.array([['deg', '%']\ + for i in range(len(Q0))]).flatten())) + nested_MC.set_detector_function(d) + swfs = [swf.FLsweep(lm = lm, + par = lm.parameters[f'vcz_asymmetry_{directions[i][0]}'], + waveform_name = f'cz_{directions[i][0]}') + for i, lm in enumerate(Flux_lm_0) ] + swf1 = swf.multi_sweep_function(sweep_functions=swfs) + nested_MC.set_sweep_function(swf1) + nested_MC.set_sweep_points(Asymmetries) + + MC.live_plot_enabled(False) + nested_MC.run(f'VCZ_asymmetry_sweep_{Q0}_{Q1}_{Q_parks}', mode='1D', + disable_snapshot_metadata=disable_metadata) + MC.live_plot_enabled(True) + a = ma2.tqg.VCZ_asymmetry_sweep_Analysis(label='VCZ_asymmetry_sweep') + ################################ + # Update (or reset) flux params + ################################ + for i, flux_lm in enumerate(Flux_lm_0): + param = flux_lm.parameters[f'vcz_asymmetry_{directions[i][0]}'] + if update_params: + param(a.qoi[f'asymmetry_opt_{i}']) + print(f'Updated {param.name} to {a.qoi[f"asymmetry_opt_{i}"]*100:.3f}%') + else: + param(0) + print(f'Reset {param.name} to 0%') + + def calibrate_cz_pad_samples( + self, + Q_ramsey: str, + Q_control: str, + flux_cw: str = 'cz', + Sample_points: list = None, + downsample_angle_points: int = 1, + update: bool = True, + prepare_for_timedomain: bool = True, + disable_metadata: bool = False, + extract_only: bool = True): + """ + Dont forget to write a description for this function. + """ + assert self.ro_acq_weight_type().lower() == 'optimal' + MC = self.instr_MC.get_instr() + nested_MC = self.instr_nested_MC.get_instr() + # get gate directions of two-qubit gate codewords + directions = get_gate_directions(Q_ramsey, Q_control) + fl_lm = self.find_instrument(Q_control).instr_LutMan_Flux.get_instr() + fl_par = f'vcz_amp_pad_samples_{directions[1]}' + # Calculate sweep points + _sample_rate = fl_lm.sampling_rate() + _time_pad = fl_lm.get(f'vcz_time_pad_{directions[1]}') + max_samples = int(_time_pad*_sample_rate) + if Sample_points is None: + Sample_points = np.arange(1, max_samples) + # Prepare for timedomain + if prepare_for_timedomain: + self.prepare_for_timedomain(qubits=[Q_ramsey, Q_control]) + for q in [Q_ramsey]: + mw_lm = self.find_instrument(q).instr_LutMan_MW.get_instr() + mw_lm.set_default_lutmap() + mw_lm.load_phase_pulses_to_AWG_lookuptable() + # Wrapper function for parity check ramsey detector function. + def wrapper(Q_target, Q_control, + flux_cw, + downsample_angle_points, + extract_only): + a = self.measure_parity_check_ramsey( + Q_target = [Q_target], + Q_control = [Q_control], + flux_cw_list = [flux_cw], + control_cases = None, + downsample_angle_points = downsample_angle_points, + prepare_for_timedomain = False, + pc_repetitions=1, + solve_for_phase_gate_model = False, + disable_metadata = True, + extract_only = extract_only) + pm = { f'Phase_model_{op}' : a['Phase_model'][Q_target][op]\ + for op in a['Phase_model'][Q_target].keys()} + mf = { f'missing_fraction_{q}' : a['Missing_fraction'][q]\ + for q in [Q_control] } + return { **pm, **mf} + # n = len([Q_control]) + # Operators = ['{:0{}b}'.format(i, n).replace('0','I').replace('1','Z')\ + # for i in range(2**n)] + Operators = ['I', 'Z'] + d = det.Function_Detector( + wrapper, + msmt_kw={'Q_target' : Q_ramsey, + 'Q_control' : Q_control, + 'flux_cw': flux_cw, + 'downsample_angle_points': downsample_angle_points, + 'extract_only': extract_only}, + result_keys=[f'Phase_model_{op}' for op in Operators]+\ + [f'missing_fraction_{Q_control}'], + value_names=[f'Phase_model_{op}' for op in Operators]+\ + [f'missing_fraction_{Q_control}'], + value_units=['deg' for op in Operators]+\ + ['fraction']) + nested_MC.set_detector_function(d) + # Set sweep function + swf1 = swf.flux_make_pulse_netzero( + flux_lutman = fl_lm, + wave_id = f'cz_{directions[1]}') + nested_MC.set_sweep_function(swf1) + nested_MC.set_sweep_points(Sample_points) + + MC.live_plot_enabled(False) + label = f'Pad_samples_calibration_{"_".join([Q_ramsey, Q_control])}' + nested_MC.run(label, disable_snapshot_metadata=disable_metadata) + # Run analysis + a = ma2.Basic1DAnalysis(label=label) + # get minimum missing fraction + mf = a.raw_data_dict['measured_values_ord_dict'][f'missing_fraction_{Q_control}'][0] + sample_points = a.raw_data_dict['xvals'][0] + min_idx = np.argmin(mf) + opt_val = sample_points[min_idx] + if update: + fl_lm.set(f'vcz_amp_pad_samples_{directions[1]}', opt_val) + return a + + def measure_parity_check_fidelity( + self, + Q_ancilla: List[str], + Q_control: List[str], + flux_cw_list: List[str], + control_cases: List[str] = None, + prepare_for_timedomain: bool = True, + initialization_msmt: bool = False, + disable_metadata: bool = False, + nr_shots_per_case: int = 2**12, + wait_time_before_flux_ns: int = 0, + wait_time_after_flux_ns: int = 0 + ): + ''' + Measures parity check fidelity by preparing each. + Note: When using heralded initialization, Q_control has + to be given in ascending order + ''' + assert self.ro_acq_weight_type().lower() == 'optimal' + assert len(Q_ancilla) == 1 + MC = self.instr_MC.get_instr() + if control_cases == None: + control_cases = ['{:0{}b}'.format(i, len(Q_control))\ + for i in range(2**len(Q_control))] + else: + for case in control_cases: + assert len(case) == len(Q_control) + qubit_list = Q_ancilla+Q_control + if prepare_for_timedomain: + self.prepare_for_timedomain(qubits=qubit_list) + if not initialization_msmt: + self.prepare_readout(qubits=Q_ancilla) + + Q_ancilla_idx = [self.find_instrument(q).cfg_qubit_nr() + for q in Q_ancilla] + Q_control_idx = [self.find_instrument(q).cfg_qubit_nr() + for q in Q_control] + p = mqo.parity_check_fidelity( + Q_ancilla_idx = Q_ancilla_idx, + Q_control_idx = Q_control_idx, + control_cases = control_cases, + flux_cw_list = flux_cw_list, + initialization_msmt = initialization_msmt, + wait_time_before_flux = wait_time_before_flux_ns, + wait_time_after_flux = wait_time_after_flux_ns, + platf_cfg = self.cfg_openql_platform_fn()) + s = swf.OpenQL_Sweep( + openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name="Cases", + unit="a.u.") + d = self.get_int_logging_detector(qubits=qubit_list) + total_shots = nr_shots_per_case*(len(control_cases)+2) + if initialization_msmt: + n = len(Q_control) + total_shots = nr_shots_per_case*(len(control_cases)*2+2**(n+1)) + for detector in d.detectors: + detector.nr_shots = total_shots + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(total_shots)) + MC.set_detector_function(d) + label = f'Parity_check_fidelity_{"_".join(qubit_list)}' + MC.run(label,disable_snapshot_metadata=disable_metadata) + a = ma2.tqg.Parity_check_fidelity_analysis( + label=label, + Q_ancilla=Q_ancilla[0], + Q_control=Q_control, + control_cases=control_cases, + post_selection=initialization_msmt) + return a.qoi + + def measure_sandia_parity_benchmark(self, + ancilla_qubit: str, + data_qubits: list, + flux_cw_list: list, + wait_time_before_flux: int = 0, + wait_time_after_flux: int = 0, + prepare_for_timedomain:bool=True): + ################### + # setup qubit idxs + ################### + all_qubits = [ancilla_qubit]+data_qubits + ancilla_idx = self.find_instrument(ancilla_qubit).cfg_qubit_nr() + data_idxs = [ self.find_instrument(q).cfg_qubit_nr() for q in data_qubits ] + ########################################### + # RO preparation (assign res_combinations) + ########################################### + RO_lms = np.unique([self.find_instrument(q).instr_LutMan_RO() for q in all_qubits]) + qubit_RO_lm = { self.find_instrument(q).cfg_qubit_nr() : + (self.find_instrument(q).name, + self.find_instrument(q).instr_LutMan_RO()) for q in all_qubits } + main_qubits = [] + exception_qubits = [] + res_combs = {} + for lm in RO_lms: + res_combs[lm] = [] + comb = [] + for idx in data_idxs+[ancilla_idx]: + if qubit_RO_lm[idx][1] == lm: + comb += [idx] + res_combs[lm] += [comb] + if qubit_RO_lm[ancilla_idx][1] == lm: + res_combs[lm] += [[ancilla_idx]] + main_qubits = [qubit_RO_lm[idx][0] for idx in comb] + else: + exception_qubits += [qubit_RO_lm[idx][0] for idx in comb] + # Time-domain preparation + ordered_qubits = main_qubits+exception_qubits + if prepare_for_timedomain: + assert self.ro_acq_weight_type() == 'optimal' + self.prepare_for_timedomain(ordered_qubits) + for lm in RO_lms: + ro_lm = self.find_instrument(lm) + ro_lm.resonator_combinations(res_combs[lm]) + ro_lm.load_DIO_triggered_sequence_onto_UHFQC() + ######################## + # SETUP MC and detector + ######################## + uhfqc_max_avg = 2**17 + d = self.get_int_logging_detector(qubits=ordered_qubits, result_logging_mode='raw') + for detector in d.detectors: + detector.nr_shots = int(uhfqc_max_avg/5)*5 + p = mqo.Parity_Sandia_benchmark(qA=ancilla_idx, + QDs=data_idxs, + flux_cw_list=flux_cw_list, + wait_time_before_flux=wait_time_before_flux, + wait_time_after_flux=wait_time_after_flux, + platf_cfg=self.cfg_openql_platform_fn()) + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr()) + MC = self.instr_MC.get_instr() + MC.soft_avg(1) + MC.live_plot_enabled(False) + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(int(uhfqc_max_avg/5)*5)) + MC.set_detector_function(d) + MC.run(f"Sandia_parity_benchmark_{ancilla_qubit}_{data_qubits[0]}_{data_qubits[1]}_{data_qubits[2]}_{data_qubits[3]}") + + ma2.pba.Sandia_parity_benchmark(label='Sandia', + ancilla_qubit=ancilla_qubit, + data_qubits=data_qubits, + exception_qubits=exception_qubits) + + def measure_weight_n_parity_tomography( + self, + ancilla_qubit: str, + data_qubits: list, + flux_cw_list: list, + sim_measurement: bool, + prepare_for_timedomain: bool=True, + initialization_msmt: bool = False, + repetitions: int=3, + wait_time_before_flux: int = 0, + wait_time_after_flux: int = 0, + n_rounds = 1, + disable_metadata=True, + readout_duration_ns: int = 480 + ): + assert self.ro_acq_weight_type().lower() == 'optimal' + assert self.ro_acq_digitized() == False + ################### + # setup qubit idxs + ################### + n = len(data_qubits) + all_qubits = [ancilla_qubit]+data_qubits + ancilla_idx = self.find_instrument(ancilla_qubit).cfg_qubit_nr() + data_idxs = [ self.find_instrument(q).cfg_qubit_nr() for q in data_qubits ] + ########################################### + # RO preparation (assign res_combinations) + ########################################### + RO_lms = np.unique([self.find_instrument(q).instr_LutMan_RO() for q in all_qubits]) + qubit_RO_lm = {self.find_instrument(q).cfg_qubit_nr() : + (self.find_instrument(q).name, + self.find_instrument(q).instr_LutMan_RO()) for q in all_qubits } + main_qubits = [] + exception_qubits = [] + res_combs = {} + for lm in RO_lms: + res_combs[lm] = [] + comb1= [] # comb used for MUX of all qubits (final meas.) + comb2= [] # comb used for MUX of just data qubits (final meas.) + # ancilla + data qubits resonators + for idx in [ancilla_idx]+data_idxs: + if qubit_RO_lm[idx][1] == lm: + comb1+= [idx] + comb2+= [idx] + res_combs[lm] += [comb1] + if qubit_RO_lm[ancilla_idx][1] == lm: + if not ([ancilla_idx] in res_combs[lm]): + res_combs[lm] += [[ancilla_idx]] # comb of just anc. qubit + comb2.remove(ancilla_idx) + if comb2 != []: + res_combs[lm] += [comb2] + main_qubits = [qubit_RO_lm[idx][0] for idx in comb1] + else: + exception_qubits += [qubit_RO_lm[idx][0] for idx in comb1] + # Time-domain preparation + ordered_qubits = main_qubits+exception_qubits + if prepare_for_timedomain: + assert self.ro_acq_weight_type() == 'optimal' + self.prepare_for_timedomain(ordered_qubits) + for lm in RO_lms: + ro_lm = self.find_instrument(lm) + ro_lm.resonator_combinations(res_combs[lm]) + ro_lm.load_DIO_triggered_sequence_onto_UHFQC() + # Sequence is compiled with qubit order + # matching the detector order. + ord_data_qubit_idxs = [self.find_instrument(q).cfg_qubit_nr() + for q in ordered_qubits[1:]] + exc_qubit_idxs = [self.find_instrument(q).cfg_qubit_nr() + for q in exception_qubits] + p = mqo.Weight_n_parity_tomography( + Q_anc=ancilla_idx, + Q_D=ord_data_qubit_idxs, + flux_cw_list=flux_cw_list, + Q_exception=exc_qubit_idxs, + simultaneous_measurement=sim_measurement, + initialization_msmt = initialization_msmt, + wait_time_before_flux=wait_time_before_flux, + wait_time_after_flux=wait_time_after_flux, + n_rounds = n_rounds, + readout_duration_ns=readout_duration_ns, + platf_cfg=self.cfg_openql_platform_fn()) + + uhfqc_max_avg = 2**17 + if sim_measurement: + readouts_per_round = (3**n)*n_rounds+2**(n+1) + if initialization_msmt: + readouts_per_round = ((3**n)*n_rounds)*2+2**(n+1) + else: + readouts_per_round = (3**n)*(n_rounds+1)+2**(n+1) + if initialization_msmt: + readouts_per_round = ((3**n)*(n_rounds+1))*2+2**(n+1) + + d = self.get_int_logging_detector(qubits=all_qubits, + result_logging_mode='raw') + for det in d.detectors: + det.nr_shots = int(uhfqc_max_avg/readouts_per_round)*readouts_per_round + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr()) + MC = self.instr_MC.get_instr() + MC.soft_avg(1) + MC.live_plot_enabled(False) + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(int(uhfqc_max_avg/readouts_per_round) + * readouts_per_round * repetitions)) + MC.set_detector_function(d) + MC.run(f'Weight_{n}_parity_tomography_{ancilla_qubit}_'+\ + f'{"_".join(data_qubits)}_sim-msmt-{sim_measurement}'+\ + f'_rounds-{n_rounds}',disable_snapshot_metadata=disable_metadata) + # Warning analysis requires that the detector function is ordered + # as: [anc_qubit, data_qubit[0],[1],[2],[3]] + ma2.pba.Weight_n_parity_tomography( + sim_measurement=sim_measurement, + n_rounds=n_rounds, + exception_qubits=exception_qubits, + post_selection=initialization_msmt) + + ################################################ + # Surface-17 specific functions + ################################################ + def measure_defect_rate( + self, + ancilla_qubit: str, + data_qubits: list, + experiments: list, + lru_qubits: list = None, + Rounds: list = [1, 2, 4, 6, 10, 15, 25, 50], + repetitions: int = 20, + prepare_for_timedomain: bool = True, + prepare_readout: bool = True, + heralded_init: bool = True, + stabilizer_type: str = 'X', + initial_state_qubits: list = None, + measurement_time_ns: int = 500, + analyze: bool = True, + Pij_matrix: bool = True, + ): + # assert self.ro_acq_weight_type() == 'optimal IQ' + assert self.ro_acq_digitized() == False + Valid_experiments = ['single_stabilizer', 'single_stabilizer_LRU', + 'surface_13', 'surface_13_LRU', 'surface_17', + 'repetition_code'] + for exp in experiments: + assert exp in Valid_experiments, f'Experiment {exp} not a valid experiment' + number_of_kernels = len(experiments) + # Surface-17 qubits + X_ancillas = ['X1', 'X2', 'X3', 'X4'] + Z_ancillas = ['Z1', 'Z2', 'Z3', 'Z4'] + Data_qubits = ['D1', 'D2', 'D3', 'D4', 'D5', 'D6', 'D7', 'D8', 'D9'] + X_anci_idxs = [ self.find_instrument(q).cfg_qubit_nr() for q in X_ancillas ] + Z_anci_idxs = [ self.find_instrument(q).cfg_qubit_nr() for q in Z_ancillas ] + Data_idxs = [ self.find_instrument(q).cfg_qubit_nr() for q in Data_qubits ] + ancilla_qubit_idx = self.find_instrument(ancilla_qubit).cfg_qubit_nr() + data_qubits_idx = [self.find_instrument(q).cfg_qubit_nr() for q in data_qubits] + if lru_qubits: + lru_qubits_idxs = [self.find_instrument(q).cfg_qubit_nr() for q in lru_qubits] + else: + lru_qubits_idxs = [] + ###################################################### + # Prepare for timedomain + ###################################################### + # prepare mw lutmans + # for q in [ancilla_qubit]+data_qubits: + for q in Data_qubits + X_ancillas + Z_ancillas: + mw_lm = self.find_instrument(f'MW_lutman_{q}') + mw_lm.set_default_lutmap() + mw_lm.load_waveforms_onto_AWG_lookuptable() + + if prepare_for_timedomain: + # Redundancy just to be sure we are uploading every parameter + # for q_name in data_qubits+[ancilla_qubit]: + for q_name in Data_qubits+X_ancillas+Z_ancillas: + q = self.find_instrument(q_name) + q.prepare_for_timedomain() + self.prepare_for_timedomain(qubits=Data_qubits+X_ancillas+Z_ancillas, + prepare_for_readout=False) + if (prepare_for_timedomain or prepare_readout): + ################################################## + # Prepare acquisition with custom channel map + ################################################## + # Need to create ordered list of experiment qubits + # and remaining ancilla qubits + ordered_qubit_dict = {} + # _qubits = [ancilla_qubit]+data_qubits + _qubits = [ancilla_qubit]+Data_qubits + # Add qubits in experiment + for _q in _qubits: + acq_instr = self.find_instrument(_q).instr_acquisition() + if acq_instr not in ordered_qubit_dict.keys():\ + ordered_qubit_dict[acq_instr] = [_q] + else: + ordered_qubit_dict[acq_instr].append(_q) + # Add remaining ancilla qubits + _remaining_ancillas = X_ancillas + Z_ancillas + _remaining_ancillas.remove(ancilla_qubit) + _remaining_ancillas.remove('X4') + for _q in _remaining_ancillas: + acq_instr = self.find_instrument(_q).instr_acquisition() + if acq_instr not in ordered_qubit_dict.keys():\ + ordered_qubit_dict[acq_instr] = [_q] + else: + ordered_qubit_dict[acq_instr].append(_q) + ordered_qubit_list = [ x for v in ordered_qubit_dict.values() for x in v ] + # ordered_chan_map = {q:'optimal IQ' if q in _qubits else 'optimal'\ + # for q in ordered_qubit_list} + ordered_chan_map = {q:'optimal IQ' if q in _qubits+_remaining_ancillas else 'optimal'\ + for q in ordered_qubit_list} + print(ordered_qubit_list) + print(ordered_chan_map) + ## expect IQ mode for D8 & D9 [because we have 6 qubits in this feedline] + # if 'D8' in ordered_chan_map.keys() and 'D9' in ordered_chan_map.keys(): + # ordered_chan_map['D8'] = 'optimal' + # ordered_chan_map['D9'] = 'optimal' + self.ro_acq_weight_type('custom') + self.prepare_readout(qubits=ordered_qubit_list, + qubit_int_weight_type_dict=ordered_chan_map) + ################################################## + # Prepare readout pulses with custom channel map + ################################################## + RO_lutman_1 = self.find_instrument('RO_lutman_1') + RO_lutman_2 = self.find_instrument('RO_lutman_2') + RO_lutman_3 = self.find_instrument('RO_lutman_3') + RO_lutman_4 = self.find_instrument('RO_lutman_4') + if [11] not in RO_lutman_1.resonator_combinations(): + RO_lutman_1.resonator_combinations([[11], + RO_lutman_1.resonator_combinations()[0]]) + RO_lutman_1.load_waveforms_onto_AWG_lookuptable() + + if [3, 7] not in RO_lutman_2.resonator_combinations(): + RO_lutman_2.resonator_combinations([[3, 7], + RO_lutman_2.resonator_combinations()[0]]) + RO_lutman_2.load_waveforms_onto_AWG_lookuptable() + + if [8, 12] not in RO_lutman_4.resonator_combinations(): + RO_lutman_4.resonator_combinations([[8, 12], + RO_lutman_4.resonator_combinations()[0]]) + RO_lutman_4.load_waveforms_onto_AWG_lookuptable() + + # if [9, 14, 10] not in RO_lutman_3.resonator_combinations(): + # RO_lutman_3.resonator_combinations([[9, 14, 10], + # RO_lutman_3.resonator_combinations()[0]]) + # RO_lutman_3.load_waveforms_onto_AWG_lookuptable() + if [14, 10] not in RO_lutman_3.resonator_combinations(): + RO_lutman_3.resonator_combinations([[14, 10], + RO_lutman_3.resonator_combinations()[0]]) + RO_lutman_3.load_waveforms_onto_AWG_lookuptable() + # Generate compiler sequence + p = mqo.repeated_stabilizer_data_measurement_sequence( + target_stab = ancilla_qubit, + Q_anc = ancilla_qubit_idx, + Q_D = data_qubits_idx, + X_anci_idxs = X_anci_idxs, + Z_anci_idxs = Z_anci_idxs, + data_idxs = Data_idxs, + lru_idxs = lru_qubits_idxs, + platf_cfg = self.cfg_openql_platform_fn(), + experiments = experiments, + Rounds = Rounds, + stabilizer_type=stabilizer_type, + initial_state_qubits=initial_state_qubits, + measurement_time_ns=measurement_time_ns) + # Set up nr_shots on detector + d = self.int_log_det + uhfqc_max_avg = 2**19 + for det in d.detectors: + readouts_per_round = np.sum(np.array(Rounds)+heralded_init)*number_of_kernels\ + + 3*(1+heralded_init) + det.nr_shots = int(uhfqc_max_avg/readouts_per_round)*readouts_per_round + + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr()) + MC = self.instr_MC.get_instr() + MC.soft_avg(1) + MC.live_plot_enabled(False) + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(int(uhfqc_max_avg/readouts_per_round) + * readouts_per_round * repetitions)) + MC.set_detector_function(d) + if initial_state_qubits: + _title = f'Surface_13_experiment_{"_".join([str(r) for r in Rounds])}rounds'+\ + f'_excited_qubits_{"_".join(initial_state_qubits)}' + else: + _title = f'Repeated_stab_meas_{"_".join([str(r) for r in Rounds])}rounds'+\ + f'_{ancilla_qubit}_{data_qubits}_data_qubit_measurement' + if len(_title) > 96: + _title = _title[:96] # this is to avoid failure in creating hdf5 file. + try: + MC.run(_title) + a = None + if analyze: + a = ma2.pba.Repeated_stabilizer_measurements( + ancilla_qubit=ancilla_qubit, + data_qubits = data_qubits, + Rounds=Rounds, + heralded_init=heralded_init, + number_of_kernels=number_of_kernels, + experiments=experiments, + Pij_matrix=Pij_matrix, + label=_title) + self.ro_acq_weight_type('optimal') + except: + print_exception() + self.ro_acq_weight_type('optimal') + raise ValueError('Somtehing happened!') + return a + + def measure_repetition_code_defect_rate( + self, + involved_ancilla_ids: List[str], + involved_data_ids: List[str], + rounds: list = [1, 2, 4, 6, 10, 15, 25, 50], + repetitions: int = 20, + prepare_for_timedomain: bool = True, + prepare_readout: bool = True, + heralded_init: bool = True, + stabilizer_type: str = 'X', + measurement_time_ns: int = 500, + analyze: bool = True, + Pij_matrix: bool = True, + disable_metadata: bool = False, + initial_state: list = None, + ): + # assert self.ro_acq_weight_type() == 'optimal IQ' + assert self.ro_acq_digitized() == False + + # Surface-17 qubits + ancilla_x_names = ['X1', 'X2', 'X3', 'X4'] + ancilla_z_names = ['Z1', 'Z2', 'Z3', 'Z4'] + data_names = ['D1', 'D2', 'D3', 'D4', 'D5', 'D6', 'D7', 'D8', 'D9'] + ancilla_x_indices: List[int] = [ self.find_instrument(q).cfg_qubit_nr() for q in ancilla_x_names ] + ancilla_z_indices: List[int] = [ self.find_instrument(q).cfg_qubit_nr() for q in ancilla_z_names ] + all_ancilla_indices: List[int] = ancilla_x_indices + ancilla_z_indices + all_data_indices: List[int] = [ self.find_instrument(q).cfg_qubit_nr() for q in data_names ] + + involved_ancilla_indices = [self.find_instrument(q).cfg_qubit_nr() for q in involved_ancilla_ids] + involved_data_indices = [self.find_instrument(q).cfg_qubit_nr() for q in involved_data_ids] + lru_qubits_indices = [] + if initial_state is None: + initial_state: InitialStateContainer = InitialStateContainer.from_ordered_list( + [InitialStateEnum.ZERO] * len(involved_data_ids) + ) + else: + initial_state: InitialStateContainer = InitialStateContainer.from_ordered_list([ + InitialStateEnum.ZERO if state == 0 else InitialStateEnum.ONE + for state in initial_state + ]) + ###################################################### + # Prepare for timedomain + ###################################################### + def internal_prepare_for_timedomain(): + """:return: Void.""" + # prepare mw lutmans + # for q in [ancilla_qubit]+data_qubits: + for q in data_names + ancilla_x_names + ancilla_z_names: + mw_lm = self.find_instrument(f'MW_lutman_{q}') + mw_lm.set_default_lutmap() + mw_lm.load_waveforms_onto_AWG_lookuptable() + + if prepare_for_timedomain: + # Redundancy just to be sure we are uploading every parameter + # for q_name in data_qubits+[ancilla_qubit]: + for q_name in data_names+ancilla_x_names+ancilla_z_names: + q = self.find_instrument(q_name) + q.prepare_for_timedomain() + self.prepare_for_timedomain(qubits=data_names+ancilla_x_names+ancilla_z_names, prepare_for_readout=False) + if (prepare_for_timedomain or prepare_readout): + ################################################## + # Prepare acquisition with custom channel map + ################################################## + # Need to create ordered list of experiment qubits + # and remaining ancilla qubits + ordered_qubit_dict = {} + # _qubits = [ancilla_qubit]+data_qubits + _qubits = involved_ancilla_ids + data_names + # Add qubits in experiment + for _q in _qubits: + acq_instr = self.find_instrument(_q).instr_acquisition() + if acq_instr not in ordered_qubit_dict.keys():\ + ordered_qubit_dict[acq_instr] = [_q] + else: + ordered_qubit_dict[acq_instr].append(_q) + # Add remaining ancilla qubits + _remaining_ancillas = ancilla_x_names + ancilla_z_names + for involved_ancilla_id in involved_ancilla_ids: + _remaining_ancillas.remove(involved_ancilla_id) + + _remaining_ancillas.remove('X4') + + for _q in _remaining_ancillas: + acq_instr = self.find_instrument(_q).instr_acquisition() + if acq_instr not in ordered_qubit_dict.keys():\ + ordered_qubit_dict[acq_instr] = [_q] + else: + ordered_qubit_dict[acq_instr].append(_q) + ordered_qubit_list = [ x for v in ordered_qubit_dict.values() for x in v ] + # ordered_chan_map = {q:'optimal IQ' if q in _qubits else 'optimal'\ + # for q in ordered_qubit_list} + ordered_chan_map = {q:'optimal IQ' if q in _qubits+_remaining_ancillas else 'optimal'\ + for q in ordered_qubit_list} + print(ordered_qubit_list) + print(ordered_chan_map) + ## expect IQ mode for D8 & D9 [because we have 6 qubits in this feedline] + # if 'D8' in ordered_chan_map.keys() and 'D9' in ordered_chan_map.keys(): + # ordered_chan_map['D8'] = 'optimal' + # ordered_chan_map['D9'] = 'optimal' + self.ro_acq_weight_type('custom') + self.prepare_readout(qubits=ordered_qubit_list, + qubit_int_weight_type_dict=ordered_chan_map) + ################################################## + # Prepare readout pulses with custom channel map + ################################################## + RO_lutman_1 = self.find_instrument('RO_lutman_1') + RO_lutman_2 = self.find_instrument('RO_lutman_2') + RO_lutman_3 = self.find_instrument('RO_lutman_3') + RO_lutman_4 = self.find_instrument('RO_lutman_4') + if [11] not in RO_lutman_1.resonator_combinations(): + RO_lutman_1.resonator_combinations([[11], + RO_lutman_1.resonator_combinations()[0]]) + RO_lutman_1.load_waveforms_onto_AWG_lookuptable() + + if [3, 7] not in RO_lutman_2.resonator_combinations(): + RO_lutman_2.resonator_combinations([[3, 7], + RO_lutman_2.resonator_combinations()[0]]) + RO_lutman_2.load_waveforms_onto_AWG_lookuptable() + + if [8, 12] not in RO_lutman_4.resonator_combinations(): + RO_lutman_4.resonator_combinations([[8, 12], + RO_lutman_4.resonator_combinations()[0]]) + RO_lutman_4.load_waveforms_onto_AWG_lookuptable() + + # if [9, 14, 10] not in RO_lutman_3.resonator_combinations(): + # RO_lutman_3.resonator_combinations([[9, 14, 10], + # RO_lutman_3.resonator_combinations()[0]]) + # RO_lutman_3.load_waveforms_onto_AWG_lookuptable() + if [14, 10] not in RO_lutman_3.resonator_combinations(): + RO_lutman_3.resonator_combinations([[14, 10], + RO_lutman_3.resonator_combinations()[0]]) + RO_lutman_3.load_waveforms_onto_AWG_lookuptable() + # TODO: This should be refactored in a more general approach that handles all the fun things related to (timedomain) readout. + internal_prepare_for_timedomain() + + # Generate compiler sequence + p = mqo.repetition_code_sequence_old( + involved_ancilla_indices=involved_ancilla_indices, + involved_data_indices=involved_data_indices, + all_ancilla_indices=all_ancilla_indices, + all_data_indices=all_data_indices, + array_of_round_number=rounds, + platf_cfg=self.cfg_openql_platform_fn(), + stabilizer_type=stabilizer_type, + measurement_time_ns=measurement_time_ns, + initial_state=list(initial_state.as_array) + ) + # Set up nr_shots on detector + d = self.int_log_det + uhfqc_max_avg = 2**19 # 2**19 + number_of_kernels: int = 1 # Performing only a single experiment + for det in d.detectors: + nr_acquisitions = [1 if round == 0 else round for round in rounds] + readouts_per_round = np.sum(np.array(nr_acquisitions)+heralded_init) * number_of_kernels + 3*(1+heralded_init) + det.nr_shots = int(uhfqc_max_avg/readouts_per_round)*readouts_per_round + + s = swf.OpenQL_Sweep(openql_program=p, CCL=self.instr_CC.get_instr()) + MC = self.instr_MC.get_instr() + MC.soft_avg(1) + MC.live_plot_enabled(False) + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(int(uhfqc_max_avg/readouts_per_round) * readouts_per_round * repetitions)) + MC.set_detector_function(d) + + rounds_title: str = f"{rounds[0]}_to_{rounds[-1]}" + involved_qubits_title: str = ''.join(involved_ancilla_ids) + initial_state_title: str = ''.join(initial_state.as_array.astype(str)) + _title = f'Repeated_stab_meas_{rounds_title}_rounds_{involved_qubits_title}_qubits_{initial_state_title}_state' + if len(_title) > 90: + _title = _title[:90] + try: + MC.run(_title, disable_snapshot_metadata=disable_metadata) + a = None + if analyze: + # a = ma2.pba.Repeated_stabilizer_measurements( + # ancilla_qubit=involved_ancilla_ids, + # data_qubits = involved_data_ids, + # Rounds=rounds, + # heralded_init=heralded_init, + # number_of_kernels=number_of_kernels, + # experiments=['repetition_code'], + # Pij_matrix=Pij_matrix, + # label=_title) + + fillvalue = None + involved_qubit_names: List[str] = [ + item + for pair in itt.zip_longest(involved_data_ids, involved_ancilla_ids, fillvalue=fillvalue) for item in pair if item != fillvalue + ] + a = RepeatedStabilizerAnalysis( + involved_qubit_names=involved_qubit_names, + qec_cycles=rounds, + initial_state=initial_state, + label=_title, + ) + a.run_analysis() + self.ro_acq_weight_type('optimal') + except: + print_exception() + self.ro_acq_weight_type('optimal') + raise ValueError('Somtehing happened!') + return a diff --git a/pycqed/instrument_drivers/meta_instrument/lfilt_kernel_object.py b/pycqed/instrument_drivers/meta_instrument/lfilt_kernel_object.py index 2c3f754924..97f9ec79a5 100644 --- a/pycqed/instrument_drivers/meta_instrument/lfilt_kernel_object.py +++ b/pycqed/instrument_drivers/meta_instrument/lfilt_kernel_object.py @@ -100,7 +100,7 @@ def set_unused_realtime_distortions_zero(self): amplitude to zero. This method of disabling is used so as not to change the latency that is introduced. """ - max_exp_filters = 5 + max_exp_filters = 8 try: AWG = self.instr_AWG.get_instr() except Exception as e: @@ -230,7 +230,7 @@ def distort_waveform( ) nr_real_time_exp_models += 1 - if nr_real_time_exp_models > 5: + if nr_real_time_exp_models > 8: # changed 5 to 8 Jorge raise ValueError() else: y_sig = kf.exponential_decay_correction( diff --git a/pycqed/instrument_drivers/meta_instrument/qubit_objects/CCL_Transmon.py b/pycqed/instrument_drivers/meta_instrument/qubit_objects/CCL_Transmon.py index b2ff1876e5..ac1779ed0d 100644 --- a/pycqed/instrument_drivers/meta_instrument/qubit_objects/CCL_Transmon.py +++ b/pycqed/instrument_drivers/meta_instrument/qubit_objects/CCL_Transmon.py @@ -1,5 +1,7834 @@ -""" -This file provides compatibility for existing code. The functionality of this file had been moved to HAL_Transmon.py -""" -# these imports just rename the new names to the legacy names -from .HAL_Transmon import HAL_Transmon as CCLight_Transmon \ No newline at end of file + +import time +import logging +import numpy as np +from typing import List, Optional, Union +from autodepgraph import AutoDepGraph_DAG + +import pycqed +from pycqed.measurement.openql_experiments import single_qubit_oql as sqo +import pycqed.measurement.openql_experiments.multi_qubit_oql as mqo +from pycqed.measurement.openql_experiments import clifford_rb_oql as cl_oql +from pycqed.measurement.openql_experiments import pygsti_oql +from pycqed.measurement.openql_experiments import openql_helpers as oqh +from pycqed.analysis.tools import cryoscope_tools as ct +from pycqed.analysis import analysis_toolbox as a_tools +from pycqed.analysis.tools import plotting as plt_tools +from pycqed.utilities.general import gen_sweep_pts, print_exception +from pycqed.utilities.learnerND_optimize import LearnerND_Optimize, \ + mk_optimize_res_loss_func +from pycqed.utilities.learnerND_minimizer import LearnerND_Minimizer, \ + mk_minimization_loss_func, mk_minimization_goal_func, mk_min_threshold_goal_func + +from .qubit_object import Qubit +from qcodes.utils import validators as vals +from qcodes.instrument.parameter import ( + ManualParameter, InstrumentRefParameter) +from pycqed.analysis import measurement_analysis as ma +from pycqed.analysis_v2 import measurement_analysis as ma2 +from pycqed.measurement import calibration_toolbox as cal_toolbox +from pycqed.measurement.openql_experiments.openql_helpers import \ + load_range_of_oql_programs, load_range_of_oql_programs_from_filenames +from pycqed.measurement import sweep_functions as swf +from pycqed.measurement import detector_functions as det +from pycqed.measurement.mc_parameter_wrapper import wrap_par_to_swf +import pycqed.measurement.composite_detector_functions as cdf +from pycqed.utilities import learner1D_minimizer as l1dm + +import pytest +from math import ceil + +import cma +from pycqed.measurement.optimization import nelder_mead +import datetime +import multiprocessing +import warnings + +# Imported for a type check +from pycqed.instrument_drivers.physical_instruments.QuTech_AWG_Module \ + import QuTech_AWG_Module + +log = logging.getLogger(__name__) + + +class CCLight_Transmon(Qubit): + + """ + The CCLight_Transmon + Setup configuration: + Drive: CCLight controlling AWG8's and a VSM + Acquisition: UHFQC + Readout pulse configuration: LO modulated using UHFQC AWG + """ + + def __init__(self, name, **kw): + t0 = time.time() + super().__init__(name, **kw) + self.add_parameters() + self.connect_message(begin_time=t0) + + def add_instrument_ref_parameters(self): + self.add_parameter('instr_device', + docstring='Represents sample, contains all qubits ' + 'and resonators', + parameter_class=InstrumentRefParameter) + # MW sources + self.add_parameter('instr_LO_ro', + parameter_class=InstrumentRefParameter) + self.add_parameter('instr_LO_mw', + parameter_class=InstrumentRefParameter) + self.add_parameter('instr_LO_LRU', + parameter_class=InstrumentRefParameter) + self.add_parameter('instr_spec_source', + parameter_class=InstrumentRefParameter) + self.add_parameter('instr_spec_source_2', + parameter_class=InstrumentRefParameter) + + # Control electronics + self.add_parameter( + 'instr_CC', label='Central Controller', + docstring=('Device responsible for controlling the experiment' + ' using eQASM generated using OpenQL, in the near' + ' future will be the CC_Light.'), + parameter_class=InstrumentRefParameter) + self.add_parameter('instr_acquisition', + parameter_class=InstrumentRefParameter) + self.add_parameter('instr_VSM', label='Vector Switch Matrix', + parameter_class=InstrumentRefParameter) + + self.add_parameter('instr_MC', label='MeasurementControl', + parameter_class=InstrumentRefParameter) + + self.add_parameter('instr_nested_MC', + label='Nested MeasurementControl', + parameter_class=InstrumentRefParameter) + self.add_parameter('instr_SH', label='SignalHound', + parameter_class=InstrumentRefParameter) + self.add_parameter( + 'instr_FluxCtrl', label='Flux control', docstring=( + 'Instrument used to control flux can either be an IVVI rack ' + 'or a meta instrument such as the Flux control.'), + parameter_class=InstrumentRefParameter) + + self.add_parameter('instr_VNA', + docstring='Vector Network Analyzer', + parameter_class=InstrumentRefParameter, + initial_value=None) + # LutMan's + self.add_parameter('instr_LutMan_MW', + docstring='Lookuptable manager for ' + 'microwave control pulses.', + parameter_class=InstrumentRefParameter) + self.add_parameter('instr_LutMan_RO', + docstring='Lookuptable manager responsible for ' + 'microwave readout pulses.', + parameter_class=InstrumentRefParameter) + self.add_parameter('instr_LutMan_Flux', + docstring='Lookuptable manager responsible for ' + 'flux pulses.', + initial_value=None, + parameter_class=InstrumentRefParameter) + self.add_parameter('instr_LutMan_LRU', + docstring='Lookuptable manager responsible for ' + 'LRU pulses.', + initial_value=None, + parameter_class=InstrumentRefParameter) + + def add_ro_parameters(self): + """ + Adding the parameters relevant for readout. + """ + ################################ + # RO stimulus/pulse parameters # + ################################ + self.add_parameter('ro_freq', + label='Readout frequency', unit='Hz', + parameter_class=ManualParameter) + self.add_parameter('ro_freq_mod', + label='Readout-modulation frequency', unit='Hz', + initial_value=-20e6, + parameter_class=ManualParameter) + self.add_parameter('ro_pow_LO', label='RO power LO', + unit='dBm', initial_value=20, + parameter_class=ManualParameter) + + # RO pulse parameters + self.add_parameter('ro_pulse_type', initial_value='simple', + vals=vals.Enum('gated', 'simple', + 'up_down_down', 'up_down_down_final'), + parameter_class=ManualParameter) + + # Mixer offsets correction, RO pulse + self.add_parameter('ro_pulse_mixer_offs_I', unit='V', + parameter_class=ManualParameter, initial_value=0) + self.add_parameter('ro_pulse_mixer_offs_Q', unit='V', + parameter_class=ManualParameter, initial_value=0) + self.add_parameter('ro_pulse_mixer_alpha', initial_value=1, + parameter_class=ManualParameter) + self.add_parameter('ro_pulse_mixer_phi', initial_value=0, + parameter_class=ManualParameter) + + self.add_parameter('ro_pulse_length', + label='Readout pulse length', + initial_value=100e-9, + unit='s', + parameter_class=ManualParameter) + self.add_parameter('ro_pulse_amp', unit='V', + label='Readout pulse amplitude', + initial_value=0.1, + parameter_class=ManualParameter) + self.add_parameter('ro_pulse_amp_CW', unit='V', + label='Readout pulse amplitude', + initial_value=0.1, + parameter_class=ManualParameter) + self.add_parameter('ro_pulse_phi', unit='deg', initial_value=0, + parameter_class=ManualParameter) + + self.add_parameter('ro_pulse_down_length0', unit='s', + initial_value=1e-9, + parameter_class=ManualParameter) + self.add_parameter('ro_pulse_down_amp0', unit='V', initial_value=0, + parameter_class=ManualParameter) + self.add_parameter('ro_pulse_down_phi0', unit='deg', initial_value=0, + parameter_class=ManualParameter) + self.add_parameter('ro_pulse_down_length1', unit='s', + initial_value=1e-9, + parameter_class=ManualParameter) + self.add_parameter('ro_pulse_down_amp1', unit='V', initial_value=0, + parameter_class=ManualParameter) + self.add_parameter('ro_pulse_down_phi1', unit='deg', initial_value=0, + parameter_class=ManualParameter) + + ############################# + # RO acquisition parameters # + ############################# + + ro_acq_docstr = ( + 'Determines what type of integration weights to use: ' + '\n\t SSB: Single sideband demodulation\n\t' + 'DSB: Double sideband demodulation\n\t' + 'optimal: waveforms specified in "RO_acq_weight_func_I" ' + '\n\tand "RO_acq_weight_func_Q"') + + self.add_parameter('ro_acq_weight_type', + initial_value='SSB', + vals=vals.Enum( + 'SSB', 'DSB', 'optimal', 'optimal IQ', 'optimal IQ2'), + docstring=ro_acq_docstr, + parameter_class=ManualParameter) + + self.add_parameter( + 'ro_acq_weight_chI', initial_value=0, docstring=( + 'Determines the I-channel for integration. When the' + ' ro_acq_weight_type is optimal only this channel will ' + 'affect the result.'), vals=vals.Ints(0, 9), + parameter_class=ManualParameter) + self.add_parameter( + 'ro_acq_weight_chQ', initial_value=1, docstring=( + 'Determines the Q-channel for integration.'), + vals=vals.Ints(0, 9), parameter_class=ManualParameter) + + self.add_parameter('ro_acq_weight_func_I', + vals=vals.Arrays(), + label='Optimized weights for I channel', + parameter_class=ManualParameter) + self.add_parameter('ro_acq_weight_func_Q', + vals=vals.Arrays(), + label='Optimized weights for Q channel', + parameter_class=ManualParameter) + # Additional weight functions for readout of 2 state + self.add_parameter('ro_acq_weight_func_I_2', + vals=vals.Arrays(), + label='Optimized weights for I channel', + parameter_class=ManualParameter) + self.add_parameter('ro_acq_weight_func_Q_2', + vals=vals.Arrays(), + label='Optimized weights for Q channel', + parameter_class=ManualParameter) + + # FIXME!: Dirty hack because of qusurf issue #63, added 2 hardcoded + # delay samples in the optimized weights + self.add_parameter('ro_acq_weight_func_delay_samples_hack', + vals=vals.Ints(), + initial_value=0, + label='weight function delay samples', + parameter_class=ManualParameter) + + self.add_parameter( + 'ro_acq_delay', unit='s', + label='Readout acquisition delay', + vals=vals.Numbers(min_value=0), + initial_value=0, + parameter_class=ManualParameter, + docstring=('The time between the instruction that trigger the' + ' readout pulse and the instruction that triggers the ' + 'acquisition. The positive number means that the ' + 'acquisition is started after the pulse is send.')) + self.add_parameter( + 'ro_pulse_delay', unit='s', + label='Readout acquisition delay', + vals=vals.Numbers(0, 1e-6), + initial_value=0, + parameter_class=ManualParameter, + docstring=('The delay time for the readout pulse')) + + self.add_parameter( + 'ro_acq_mixer_phi', unit='degree', + label='Readout mixer phi', + vals=vals.Numbers(), + initial_value=0, + parameter_class=ManualParameter, + docstring=('acquisition mixer phi, used for mixer deskewing in' + 'real time')) + + self.add_parameter( + 'ro_acq_mixer_alpha', unit='', + label='Readout mixer alpha', + vals=vals.Numbers(min_value=0.8), + initial_value=1, + parameter_class=ManualParameter, + docstring=('acquisition mixer alpha, used for mixer deskewing in' + 'real time')) + + self.add_parameter( + 'ro_acq_input_average_length', unit='s', + label='Readout acquisition delay', + vals=vals.Numbers(min_value=0, max_value=4096/1.8e9), + initial_value=4096/1.8e9, + parameter_class=ManualParameter, + docstring=('The measurement time in input averaging.')) + + self.add_parameter('ro_acq_integration_length', initial_value=500e-9, + vals=vals.Numbers( + min_value=0, max_value=4096/1.8e9), + parameter_class=ManualParameter) + + self.add_parameter('ro_acq_averages', initial_value=1024, + vals=vals.Numbers(min_value=0, max_value=1e6), + parameter_class=ManualParameter) + + self.add_parameter('ro_soft_avg', initial_value=1, + docstring=('Number of soft averages to be ' + 'performed using the MC.'), + vals=vals.Ints(min_value=1), + parameter_class=ManualParameter) + + # self.add_parameter('ro_power_cw', label='RO power cw', + # unit='dBm', + # parameter_class=ManualParameter) + + # Single shot readout specific parameters + self.add_parameter('ro_acq_digitized', vals=vals.Bool(), + initial_value=False, + parameter_class=ManualParameter) + self.add_parameter('ro_acq_threshold', unit='dac-value', + initial_value=0, + parameter_class=ManualParameter) + self.add_parameter('ro_acq_rotated_SSB_when_optimal', vals=vals.Bool(), + docstring=( + 'bypasses optimal weights, and uses rotated SSB instead'), + initial_value=False, + parameter_class=ManualParameter) + self.add_parameter('ro_acq_rotated_SSB_rotation_angle', vals=vals.Numbers( + min_value=-np.pi, max_value=np.pi), + docstring=( + 'uses this as the rotation angle for rotated SSB'), + initial_value=0, + parameter_class=ManualParameter) + self.add_parameter('ro_acq_integration_length_weigth_function', vals=vals.Numbers( + min_value=0, max_value=4096/1.8e9), + docstring=( + 'sets weight function elements to 0 beyond this time'), + initial_value=4096/1.8e9, + parameter_class=ManualParameter) + + # self.add_parameter('cal_pt_zero', + # initial_value=None, + # vals=vals.Anything(), # should be a tuple validator + # label='Calibration point |0>', + # parameter_class=ManualParameter) + # self.add_parameter('cal_pt_one', + # initial_value=None, + # vals=vals.Anything(), # should be a tuple validator + # label='Calibration point |1>', + # parameter_class=ManualParameter) + + def add_mw_parameters(self): + # Mixer skewness correction + self.add_parameter('mw_G_mixer_phi', unit='deg', + label='Mixer skewness phi Gaussian quadrature', + parameter_class=ManualParameter, initial_value=0) + self.add_parameter('mw_G_mixer_alpha', unit='', + label='Mixer skewness alpha Gaussian quadrature', + parameter_class=ManualParameter, initial_value=1) + self.add_parameter('mw_D_mixer_phi', unit='deg', + label='Mixer skewness phi Derivative quadrature', + parameter_class=ManualParameter, initial_value=0) + self.add_parameter('mw_D_mixer_alpha', unit='', + label='Mixer skewness alpha Derivative quadrature', + parameter_class=ManualParameter, initial_value=1) + + # Mixer offsets correction, qubit drive + self.add_parameter('mw_mixer_offs_GI',unit='V', + parameter_class=ManualParameter, initial_value=0) + self.add_parameter('mw_mixer_offs_GQ', unit='V', + parameter_class=ManualParameter, initial_value=0) + self.add_parameter('mw_mixer_offs_DI', + unit='V', + parameter_class=ManualParameter, initial_value=0) + self.add_parameter('mw_mixer_offs_DQ', unit='V', + parameter_class=ManualParameter, initial_value=0) + + self.add_parameter('mw_pow_td_source', + label='Time-domain power', + unit='dBm', + initial_value=20, + parameter_class=ManualParameter) + + self.add_parameter('mw_freq_mod', + initial_value=-100e6, + label='pulse-modulation frequency', unit='Hz', + parameter_class=ManualParameter) + + self.add_parameter('mw_amp180', + label='Pi-pulse amplitude', unit='V', + initial_value=.8, + parameter_class=ManualParameter) + self.add_parameter('mw_amp90_scale', + label='pulse amplitude scaling factor', + unit='', + initial_value=.5, + vals=vals.Numbers(min_value=0, max_value=1.0), + parameter_class=ManualParameter) + + self.add_parameter('mw_channel_amp', + label='AWG channel amplitude. WARNING: Check your hardware specific limits!', + unit='', + initial_value=.5, + vals=vals.Numbers(min_value=0, max_value=1.6), + parameter_class=ManualParameter) + + self.add_parameter('mw_channel_range', + label='AWG channel range. WARNING: Check your hardware specific limits!', + unit='V', + initial_value=.8, + vals=vals.Enum(0.2, 0.4, 0.6, 0.8, 1, 2, 3, 4, 5), + parameter_class=ManualParameter) + + self.add_parameter('mw_ef_amp', + label='Pi-pulse amplitude ef-transition', unit='V', + initial_value=.4, + parameter_class=ManualParameter) + self.add_parameter('mw_fh_amp', + label='Pi-pulse amplitude fh-transition', unit='V', + initial_value=.4, + parameter_class=ManualParameter) + + self.add_parameter('mw_awg_ch', parameter_class=ManualParameter, + initial_value=1, + vals=vals.Ints()) + self.add_parameter('mw_gauss_width', unit='s', + initial_value=10e-9, + parameter_class=ManualParameter) + self.add_parameter('mw_motzoi', label='Motzoi parameter', unit='', + initial_value=0, + parameter_class=ManualParameter) + self.add_parameter('mw_vsm_marker_source', + label='VSM switch state', + initial_value='int', + vals=vals.Enum('ext', 'int'), + parameter_class=ManualParameter) + + self._mw_vsm_delay = 0 + self.add_parameter( + 'mw_vsm_delay', label='CCL VSM trigger delay', + vals=vals.Ints(0, 127), unit='samples', + docstring=('This value needs to be calibrated to ensure that ' + 'the VSM mask aligns with the microwave pulses. ' + 'Calibration is done using' + ' self.calibrate_mw_vsm_delay.'), + set_cmd=self._set_mw_vsm_delay, + get_cmd=self._get_mw_vsm_delay) + + self._mw_fine_delay = 0 + self.add_parameter('mw_fine_delay', label='fine delay of the AWG channel', + unit='s', + docstring='This parameters serves for fine tuning of ' + 'the RO, MW and flux pulses. It should be kept ' + 'positive and below 20e-9. Any larger adjustments' + 'should be done by changing CCL dio delay' + 'through device object.', + set_cmd=self._set_mw_fine_delay, + get_cmd=self._get_mw_fine_delay) + + self._flux_fine_delay = 0 + self.add_parameter('flux_fine_delay', label='fine delay of the AWG channel', + unit='s', + docstring='This parameters serves for fine tuning of ' + 'the RO, MW and flux pulses. It should be kept ' + 'positive and below 20e-9. Any larger adjustments' + 'should be done by changing CCL dio delay' + 'through device object.', + set_cmd=self._set_flux_fine_delay, + get_cmd=self._get_flux_fine_delay) + + self.add_parameter('mw_vsm_ch_in', + label='VSM input channel Gaussian component', + vals=vals.Ints(1, 4), + initial_value=1, + parameter_class=ManualParameter) + self.add_parameter('mw_vsm_mod_out', + label='VSM output module for microwave pulses', + docstring=('Selects the VSM output module for MW' + ' pulses. N.B. for spec the ' + 'spec_vsm_ch_out parameter is used.'), + vals=vals.Ints(1, 8), + initial_value=1, + parameter_class=ManualParameter) + + self.add_parameter('mw_vsm_G_amp', + label='VSM amp Gaussian component', + vals=vals.Numbers(0.1, 1.0), + initial_value=1.0, + parameter_class=ManualParameter) + self.add_parameter('mw_vsm_D_amp', + label='VSM amp Derivative component', + vals=vals.Numbers(0.1, 1.0), + initial_value=1.0, + parameter_class=ManualParameter) + self.add_parameter('mw_vsm_G_phase', + vals=vals.Numbers(-125, 45), + initial_value=0, unit='deg', + parameter_class=ManualParameter) + self.add_parameter('mw_vsm_D_phase', + vals=vals.Numbers(-125, 45), + initial_value=0, unit='deg', + parameter_class=ManualParameter) + + # LRU pulse parameters + self.add_parameter('LRU_channel_amp', + label='LRU AWG channel amplitude. WARNING: Check your hardware specific limits!', + unit='', + initial_value=.5, + vals=vals.Numbers(min_value=0, max_value=1.6), + parameter_class=ManualParameter) + self.add_parameter('LRU_channel_range', + label='LRU AWG channel range. WARNING: Check your hardware specific limits!', + unit='V', + initial_value=.8, + vals=vals.Enum(0.2, 0.4, 0.6, 0.8, 1, 2, 3, 4, 5), + parameter_class=ManualParameter) + self.add_parameter('LRU_freq', + label='LRU pulse frequency', unit='Hz', + parameter_class=ManualParameter) + self.add_parameter('LRU_freq_mod', + label='LRU pulse-modulation frequency', unit='Hz', + parameter_class=ManualParameter) + self.add_parameter('LRU_amplitude', + label='LRU pulse-amplitude', + parameter_class=ManualParameter) + self.add_parameter('LRU_duration', + initial_value=300e-9, + label='LRU total pulse duration', unit='s', + parameter_class=ManualParameter) + self.add_parameter('LRU_duration_rise', + initial_value=30e-9, + label='LRU pulse rise duration', unit='s', + parameter_class=ManualParameter) + self.add_parameter('lru_mixer_offs_GI',unit='V', + parameter_class=ManualParameter, initial_value=0) + self.add_parameter('lru_mixer_offs_GQ', unit='V', + parameter_class=ManualParameter, initial_value=0) + + def _using_QWG(self): + """ + Checks if a QWG is used for microwave control. + """ + AWG = self.instr_LutMan_MW.get_instr().AWG.get_instr() + return isinstance(AWG, QuTech_AWG_Module) + + def _set_mw_vsm_delay(self, val): + # sort of a pseudo Manual Parameter + self.instr_CC.get_instr().set( + 'vsm_channel_delay{}'.format(self.cfg_qubit_nr()), val) + self._mw_vsm_delay = val + + def _get_mw_vsm_delay(self): + return self._mw_vsm_delay + + def _set_mw_fine_delay(self, val): + if self.cfg_with_vsm(): + log.warning('CCL transmon is using VSM. Use mw_vsm_delay to' + 'adjust delay') + else: + lutman = self.find_instrument(self.instr_LutMan_MW()) + AWG = lutman.find_instrument(lutman.AWG()) + if self._using_QWG(): + log.warning( + 'CCL transmon is using QWG. mw_fine_delay not supported.') + else: + AWG.set('sigouts_{}_delay'.format(lutman.channel_I()-1), val) + AWG.set('sigouts_{}_delay'.format(lutman.channel_Q()-1), val) + self._mw_fine_delay = val + + def _get_mw_fine_delay(self): + return self._mw_fine_delay + + def _set_flux_fine_delay(self, val): + if self.instr_LutMan_Flux() is not None: + lutman = self.find_instrument(self.instr_LutMan_Flux()) + AWG = lutman.find_instrument(lutman.AWG()) + if self._using_QWG(): + log.warning('CCL transmon is using QWG. Not implemented.') + else: + AWG.set('sigouts_{}_delay'.format( + lutman.cfg_awg_channel()-1), val) + # val = AWG.get('sigouts_{}_delay'.format(lutman.cfg_awg_channel()-1)) + else: + log.warning( + 'No Flux LutMan specified, could not set flux timing fine') + self._flux_fine_delay = val + + def _get_flux_fine_delay(self): + return self._flux_fine_delay + + def add_spec_parameters(self): + self.add_parameter('spec_vsm_amp', + label='VSM amplitude for spec pulses', + vals=vals.Numbers(0.1, 1.0), + initial_value=1.0, + parameter_class=ManualParameter) + + self.add_parameter('spec_vsm_mod_out', + label='VSM output module for spectroscopy pulses', + docstring=('Selects the VSM output channel for spec' + ' pulses. N.B. for mw pulses the ' + 'spec_mw_ch_out parameter is used.'), + vals=vals.Ints(1, 8), + initial_value=1, + parameter_class=ManualParameter) + + self.add_parameter('spec_vsm_ch_in', + label='VSM input channel for spec pulses', + docstring=('VSM input channel for spec pulses' + ' generally this should be the same as ' + ' the mw_vsm_ch_Gin parameter.'), + vals=vals.Ints(1, 4), + initial_value=1, + parameter_class=ManualParameter) + + self.add_parameter('spec_pulse_length', + label='Pulsed spec pulse duration', + unit='s', vals=vals.Numbers(0e-9, 20e-6), + # FIXME validator: should be multiple of 20e-9 + initial_value=500e-9, + parameter_class=ManualParameter) + + self.add_parameter( + 'spec_type', parameter_class=ManualParameter, docstring=( + 'determines what kind of spectroscopy to do, \n' + '"CW": opens the relevant VSM channel to always let the tone ' + 'through. \n' + '"vsm_gated": uses the VSM in external mode to gate the spec ' + 'source. \n ' + '"IQ" uses the TD source and AWG8 to generate a spec pulse'), + initial_value='CW', + vals=vals.Enum('CW', 'IQ', 'vsm_gated')) + + self.add_parameter( + 'spec_amp', unit='V', docstring=( + 'Amplitude of the spectroscopy pulse in the mw LutMan. ' + 'The power of the spec pulse should be controlled through ' + 'the vsm amplitude "spec_vsm_amp"'), + vals=vals.Numbers(0, 1), parameter_class=ManualParameter, + initial_value=0.8) + self.add_parameter( + 'spec_pow', unit='dB', + vals=vals.Numbers(-70, 20), + parameter_class=ManualParameter, + initial_value=-30) + self.add_parameter( + 'spec_wait_time', unit='s', + vals=vals.Numbers(0, 100e-6), + parameter_class=ManualParameter, + initial_value=0) + + def add_flux_parameters(self): + # fl_dc_ is the prefix for DC flux bias related params + # FIXME: + self.add_parameter( + 'fl_dc_polycoeff', + docstring='Polynomial coefficients for current to frequency conversion', + vals=vals.Arrays(), + # initial value is chosen to not raise errors + initial_value=np.array([0, 0, -1e12, 0, 6e9]), + parameter_class=ManualParameter) + + self.add_parameter( + 'fl_ac_polycoeff', + docstring='Polynomial coefficients for current to frequency conversion', + vals=vals.Arrays(), + # initial value is chosen to not raise errors + initial_value=np.array([0, 0, -1e12, 0, 6e9]), + parameter_class=ManualParameter) + + self.add_parameter( + 'fl_dc_I_per_phi0', label='Flux bias I/Phi0', + docstring='Conversion factor for flux bias, current per flux quantum', + vals=vals.Numbers(), unit='A', initial_value=10e-3, + parameter_class=ManualParameter) + self.add_parameter( + 'fl_dc_I', label='Flux bias', unit='A', + docstring='Current flux bias setting', vals=vals.Numbers(), + initial_value=0, parameter_class=ManualParameter) + self.add_parameter( + 'fl_dc_I0', unit='A', label='Flux bias sweet spot', docstring=( + 'Flux bias offset corresponding to the sweetspot'), + vals=vals.Numbers(), initial_value=0, + parameter_class=ManualParameter) + # ? not used anywhere + self.add_parameter( + 'fl_dc_ch', label='Flux bias channel', + docstring=('Used to determine the DAC channel used for DC ' + 'flux biasing. Should be an int when using an IVVI rack' + 'or a str (channel name) when using an SPI rack.'), + vals=vals.Strings(), initial_value=None, + parameter_class=ManualParameter) + + # Currently this has only the parameters for 1 CZ gate. + # in the future there will be 5 distinct flux operations for which + # parameters have to be stored. + # cz to all nearest neighbours (of which 2 are only phase corr) and + # the "park" operation. + self.add_parameter('fl_cz_length', vals=vals.Numbers(), + unit='s', initial_value=35e-9, + parameter_class=ManualParameter) + self.add_parameter('fl_cz_lambda_2', vals=vals.Numbers(), + initial_value=0, + parameter_class=ManualParameter) + self.add_parameter('fl_cz_lambda_3', vals=vals.Numbers(), + initial_value=0, + parameter_class=ManualParameter) + self.add_parameter('fl_cz_theta_f', vals=vals.Numbers(), + unit='deg', + initial_value=80, + parameter_class=ManualParameter) + self.add_parameter('fl_cz_V_per_phi0', vals=vals.Numbers(), + unit='V', initial_value=1, + parameter_class=ManualParameter) + self.add_parameter('fl_cz_freq_01_max', vals=vals.Numbers(), + unit='Hz', parameter_class=ManualParameter) + self.add_parameter('fl_cz_J2', vals=vals.Numbers(), + unit='Hz', + initial_value=50e6, + parameter_class=ManualParameter) + self.add_parameter('fl_cz_freq_interaction', vals=vals.Numbers(), + unit='Hz', + parameter_class=ManualParameter) + self.add_parameter('fl_cz_phase_corr_length', + unit='s', + initial_value=5e-9, vals=vals.Numbers(), + parameter_class=ManualParameter) + self.add_parameter('fl_cz_phase_corr_amp', + unit='V', + initial_value=0, vals=vals.Numbers(), + parameter_class=ManualParameter) + + def add_config_parameters(self): + self.add_parameter( + 'cfg_trigger_period', label='Trigger period', + docstring=('Time between experiments, used to initialize all' + ' qubits in the ground state'), + unit='s', initial_value=200e-6, + parameter_class=ManualParameter, + vals=vals.Numbers(min_value=1e-6, max_value=327668e-9)) + self.add_parameter('cfg_openql_platform_fn', + label='OpenQL platform configuration filename', + parameter_class=ManualParameter, + vals=vals.Strings()) + self.add_parameter( + 'cfg_qubit_nr', label='Qubit number', vals=vals.Ints(0, 20), + parameter_class=ManualParameter, initial_value=0, + docstring='The qubit number is used in the OpenQL compiler. ') + + self.add_parameter('cfg_qubit_freq_calc_method', + initial_value='latest', + parameter_class=ManualParameter, + vals=vals.Enum('latest', 'flux')) + self.add_parameter('cfg_rb_calibrate_method', + initial_value='restless', + parameter_class=ManualParameter, + vals=vals.Enum('restless', 'ORBIT')) + + self.add_parameter('cfg_cycle_time', + initial_value=20e-9, + unit='s', + parameter_class=ManualParameter, + # this is to effictively hardcode the cycle time + vals=vals.Enum(20e-9)) + # TODO: add docstring (Oct 2017) + self.add_parameter('cfg_prepare_ro_awg', vals=vals.Bool(), + docstring=('If False disables uploading pusles ' + 'to UHFQC'), + initial_value=True, + parameter_class=ManualParameter) + + self.add_parameter('cfg_prepare_mw_awg', vals=vals.Bool(), + docstring=('If False disables uploading pusles ' + 'to AWG8'), + initial_value=True, + parameter_class=ManualParameter) + self.add_parameter('cfg_with_vsm', vals=vals.Bool(), + docstring=('to avoid using the VSM if set to False' + ' bypasses all commands to vsm if set False'), + initial_value=True, + parameter_class=ManualParameter) + + self.add_parameter('cfg_spec_mode', vals=vals.Bool(), + docstring=( + 'Used to activate spec mode in measurements'), + initial_value=False, + parameter_class=ManualParameter) + + def add_generic_qubit_parameters(self): + self.add_parameter('E_c', unit='Hz', + initial_value=300e6, + parameter_class=ManualParameter, + vals=vals.Numbers()) + self.add_parameter('E_j', unit='Hz', + parameter_class=ManualParameter, + vals=vals.Numbers()) + self.add_parameter('T1', unit='s', + parameter_class=ManualParameter, + vals=vals.Numbers(0, 200e-6)) + self.add_parameter('T2_echo', unit='s', + parameter_class=ManualParameter, + vals=vals.Numbers(0, 200e-6)) + self.add_parameter('T2_star', unit='s', + parameter_class=ManualParameter, + vals=vals.Numbers(0, 200e-6)) + + self.add_parameter('freq_qubit', + label='Qubit frequency', unit='Hz', + parameter_class=ManualParameter) + self.add_parameter('freq_max', + label='qubit sweet spot frequency', unit='Hz', + parameter_class=ManualParameter) + self.add_parameter('freq_res', + label='Resonator frequency', unit='Hz', + parameter_class=ManualParameter) + self.add_parameter('asymmetry', unit='', + docstring='Asymmetry parameter of the SQUID loop', + initial_value=0, + parameter_class=ManualParameter) + self.add_parameter('anharmonicity', unit='Hz', + label='Anharmonicity', + docstring='Anharmonicity, negative by convention', + parameter_class=ManualParameter, + # typical target value + initial_value=-300e6, + vals=vals.Numbers()) + self.add_parameter('anharmonicity_3state', unit='Hz', + label='Anharmonicity_3state', + docstring='Anharmonicity of 3rd state (f23-f01),'+\ + ' negative by convention', + parameter_class=ManualParameter, + # typical target value + initial_value=-300e6, + vals=vals.Numbers()) + self.add_parameter('dispersive_shift', + label='Resonator dispersive shift', unit='Hz', + parameter_class=ManualParameter, + vals=vals.Numbers()) + self.add_parameter('F_RB', + initial_value=0, + label='RB single qubit Clifford fidelity', + vals=vals.Numbers(0, 1.0), + parameter_class=ManualParameter) + + def prepare_for_continuous_wave(self): + if 'optimal' in self.ro_acq_weight_type(): + log.warning('Changing ro_acq_weight_type to SSB.') + self.ro_acq_weight_type('SSB') + if self.ro_acq_weight_type() not in {'DSB', 'SSB'}: + # this is because the CW acquisition detects using angle and phase + # and this requires two channels to rotate the signal properly. + raise ValueError('Readout "{}" '.format(self.ro_acq_weight_type()) + + 'weight type must be "SSB" or "DSB"') + + if self.cfg_with_vsm(): + self._prep_cw_configure_VSM() + + self.prepare_readout(CW=True) + self._prep_cw_spec() + # source is turned on in measure spec when needed + self.instr_LO_mw.get_instr().off() + if self.instr_spec_source() != None: + self.instr_spec_source.get_instr().off() + if self.instr_spec_source_2() != None: + self.instr_spec_source_2.get_instr().off() + + def _prep_cw_spec(self): + if self.cfg_with_vsm(): + VSM = self.instr_VSM.get_instr() + if self.spec_type() == 'CW': + marker_source = 'int' + else: + marker_source = 'ext' + + if self.instr_spec_source() != None: + self.instr_spec_source.get_instr().power(self.spec_pow()) + + def prepare_readout(self, CW=False): + """ + Configures the readout. Consists of the following steps + - instantiate the relevant detector functions + - set the microwave frequencies and sources + - generate the RO pulse + - set the integration weights + """ + self._prep_ro_instantiate_detectors() + self._prep_ro_sources() + + if self.cfg_prepare_ro_awg(): + self.instr_acquisition.get_instr().load_default_settings( + upload_sequence=False) + self._prep_ro_pulse(CW=CW) + self._prep_ro_integration_weights() + self._prep_deskewing_matrix() + else: + log.warning( + '"cfg_prepare_ro_awg" set to False, not preparing readout .') + + def _prep_deskewing_matrix(self): + UHFQC = self.instr_acquisition.get_instr() + alpha = self.ro_acq_mixer_alpha() + phi = self.ro_acq_mixer_phi() + predistortion_matrix = np.array( + ((1, -alpha * np.sin(phi * 2 * np.pi / 360)), + (0, alpha * np.cos(phi * 2 * np.pi / 360)))) + UHFQC.qas_0_deskew_rows_0_cols_0(predistortion_matrix[0, 0]) + UHFQC.qas_0_deskew_rows_0_cols_1(predistortion_matrix[0, 1]) + UHFQC.qas_0_deskew_rows_1_cols_0(predistortion_matrix[1, 0]) + UHFQC.qas_0_deskew_rows_1_cols_1(predistortion_matrix[1, 1]) + return predistortion_matrix + + def _prep_ro_instantiate_detectors(self): + self.instr_MC.get_instr().soft_avg(self.ro_soft_avg()) + if 'optimal' in self.ro_acq_weight_type(): + if self.ro_acq_weight_type() == 'optimal': + ro_channels = [self.ro_acq_weight_chI()] + elif (self.ro_acq_weight_type() == 'optimal IQ') or \ + (self.ro_acq_weight_type() == 'optimal IQ2'): + ro_channels = [ + self.ro_acq_weight_chI(), self.ro_acq_weight_chQ()] + result_logging_mode = 'lin_trans' + + if self.ro_acq_digitized(): + result_logging_mode = 'digitized' + # Update the RO theshold + acq_ch = self.ro_acq_weight_chI() + + # The threshold that is set in the hardware needs to be + # corrected for the offset as this is only applied in + # software. + + if abs(self.ro_acq_threshold()) > 32: + threshold = 32 + log.warning('Clipping {}.ro_acq_threshold {}>32'.format( + self.name, self.ro_acq_threshold())) + # working around the limitation of threshold in UHFQC + # which cannot be >abs(32). + else: + threshold = self.ro_acq_threshold() + + self.instr_acquisition.get_instr().set( + 'qas_0_thresholds_{}_level'.format(acq_ch), threshold) + + else: + ro_channels = [self.ro_acq_weight_chI(), + self.ro_acq_weight_chQ()] + result_logging_mode = 'raw' + + if 'UHFQC' in self.instr_acquisition(): + UHFQC = self.instr_acquisition.get_instr() + + self.input_average_detector = det.UHFQC_input_average_detector( + UHFQC=UHFQC, + AWG=self.instr_CC.get_instr(), + nr_averages=self.ro_acq_averages(), + nr_samples=int(self.ro_acq_input_average_length()*1.8e9)) + + self.int_avg_det = self.get_int_avg_det() + + self.int_avg_det_single = det.UHFQC_integrated_average_detector( + UHFQC=UHFQC, AWG=self.instr_CC.get_instr(), + channels=ro_channels, + result_logging_mode=result_logging_mode, + nr_averages=self.ro_acq_averages(), + real_imag=True, single_int_avg=True, + integration_length=self.ro_acq_integration_length()) + + self.UHFQC_spec_det = det.UHFQC_spectroscopy_detector( + UHFQC=UHFQC, ro_freq_mod=self.ro_freq_mod(), + AWG=self.instr_CC.get_instr(), channels=ro_channels, + nr_averages=self.ro_acq_averages(), + integration_length=self.ro_acq_integration_length()) + + self.int_log_det = det.UHFQC_integration_logging_det( + UHFQC=UHFQC, AWG=self.instr_CC.get_instr(), + channels=ro_channels, + result_logging_mode=result_logging_mode, + integration_length=self.ro_acq_integration_length()) + else: + raise NotImplementedError() + + def get_int_avg_det(self, **kw): + """ + Instantiates an integration average detector using parameters from + the qubit object. **kw get passed on to the class when instantiating + the detector function. + """ + + if self.ro_acq_weight_type() == 'optimal': + ro_channels = [self.ro_acq_weight_chI()] + + if self.ro_acq_digitized(): + result_logging_mode = 'digitized' + else: + result_logging_mode = 'lin_trans' + else: + ro_channels = [self.ro_acq_weight_chI(), + self.ro_acq_weight_chQ()] + result_logging_mode = 'raw' + + int_avg_det = det.UHFQC_integrated_average_detector( + UHFQC=self.instr_acquisition.get_instr(), + AWG=self.instr_CC.get_instr(), + channels=ro_channels, + result_logging_mode=result_logging_mode, + nr_averages=self.ro_acq_averages(), + integration_length=self.ro_acq_integration_length(), **kw) + + return int_avg_det + + def _prep_ro_sources(self): + if self.instr_LutMan_RO.get_instr().LO_freq() is not None: + log.info('Warning: This qubit is using a fixed RO LO frequency.') + LO = self.instr_LO_ro.get_instr() + ro_Lutman = self.instr_LutMan_RO.get_instr() + LO_freq = ro_Lutman.LO_freq() + LO.frequency.set(LO_freq) + mod_freq = self.ro_freq() - LO_freq + self.ro_freq_mod(mod_freq) + log.info("Setting modulation freq of {} to {}".format(self.name, mod_freq)) + + else: + LO = self.instr_LO_ro.get_instr() + LO.frequency.set(self.ro_freq() - self.ro_freq_mod()) + + LO.on() + LO.power(self.ro_pow_LO()) + + def _prep_ro_pulse(self, upload=True, CW=False): + """ + Sets the appropriate parameters in the RO LutMan and uploads the + desired wave. + Relevant parameters are: + ro_pulse_type ("up_down_down", "square") + ro_freq_mod + ro_acq_delay + + ro_pulse_length + ro_pulse_amp + ro_pulse_phi + ro_pulse_down_length0 + ro_pulse_down_amp0 + ro_pulse_down_phi0 + ro_pulse_down_length1 + ro_pulse_down_amp1 + ro_pulse_down_phi1 + + + ro_pulse_mixer_alpha + ro_pulse_mixer_phi + + ro_pulse_mixer_offs_I + ro_pulse_mixer_offs_Q + + """ + if CW: + ro_amp = self.ro_pulse_amp_CW() + else: + ro_amp = self.ro_pulse_amp() + + if 'UHFQC' not in self.instr_acquisition(): + raise NotImplementedError() + UHFQC = self.instr_acquisition.get_instr() + + if 'gated' in self.ro_pulse_type().lower(): + UHFQC.awg_sequence_acquisition() + + else: + ro_lm = self.instr_LutMan_RO.get_instr() + ro_lm.AWG(self.instr_acquisition()) + + idx = self.cfg_qubit_nr() + # These parameters affect all resonators + ro_lm.set('resonator_combinations', + [[idx]]) + ro_lm.set('pulse_type', + 'M_' + self.ro_pulse_type()) + ro_lm.set('mixer_alpha', + self.ro_pulse_mixer_alpha()) + ro_lm.set('mixer_phi', + self.ro_pulse_mixer_phi()) + + ro_lm.set('M_modulation_R{}'.format(idx), + self.ro_freq_mod()) + ro_lm.set('M_length_R{}'.format(idx), + self.ro_pulse_length()) + ro_lm.set('M_amp_R{}'.format(idx), + ro_amp) + ro_lm.set('M_delay_R{}'.format(idx), + self.ro_pulse_delay()) + ro_lm.set('M_phi_R{}'.format(idx), + self.ro_pulse_phi()) + ro_lm.set('M_down_length0_R{}'.format(idx), + self.ro_pulse_down_length0()) + ro_lm.set('M_down_amp0_R{}'.format(idx), + self.ro_pulse_down_amp0()) + ro_lm.set('M_down_phi0_R{}'.format(idx), + self.ro_pulse_down_phi0()) + ro_lm.set('M_down_length1_R{}'.format(idx), + self.ro_pulse_down_length1()) + ro_lm.set('M_down_amp1_R{}'.format(idx), + self.ro_pulse_down_amp1()) + ro_lm.set('M_down_phi1_R{}'.format(idx), + self.ro_pulse_down_phi1()) + + ro_lm.acquisition_delay(self.ro_acq_delay()) + if upload: + ro_lm.load_DIO_triggered_sequence_onto_UHFQC() + UHFQC.sigouts_0_offset(self.ro_pulse_mixer_offs_I()) + UHFQC.sigouts_1_offset(self.ro_pulse_mixer_offs_Q()) + + if [self.cfg_qubit_nr()] not in ro_lm.resonator_combinations(): + log.warning('Qubit number of {} is not '.format(self.name) + + 'present in resonator_combinations of the readout lutman.') + + def _prep_ro_integration_weights(self): + """ + Sets the ro acquisition integration weights. + The relevant parameters here are + ro_acq_weight_type -> 'SSB', 'DSB' or 'Optimal' + ro_acq_weight_chI -> Specifies which integration weight + (channel) to use + ro_acq_weight_chQ -> The second channel in case of SSB/DSB + RO_acq_weight_func_I -> A custom integration weight (array) + RO_acq_weight_func_Q -> "" + + """ + if 'UHFQC' in self.instr_acquisition(): + UHFQC = self.instr_acquisition.get_instr() + if self.ro_acq_weight_type() == 'SSB': + UHFQC.prepare_SSB_weight_and_rotation( + IF=self.ro_freq_mod(), + weight_chI=self.ro_acq_weight_chI(), + weight_chQ=self.ro_acq_weight_chQ()) + elif self.ro_acq_weight_type() == 'DSB': + UHFQC.prepare_DSB_weight_and_rotation( + IF=self.ro_freq_mod(), + weight_chI=self.ro_acq_weight_chI(), + weight_chQ=self.ro_acq_weight_chQ()) + elif 'optimal' in self.ro_acq_weight_type(): + if (self.ro_acq_weight_func_I() is None or + self.ro_acq_weight_func_Q() is None): + log.warning('Optimal weights are None,' + + ' not setting integration weights') + elif self.ro_acq_rotated_SSB_when_optimal(): + # this allows bypasing the optimal weights for poor SNR qubits + # working around the limitation of threshold in UHFQC + # which cannot be >abs(32) + if self.ro_acq_digitized() and abs(self.ro_acq_threshold()) > 32: + scaling_factor = 32/self.ro_acq_threshold() + else: + scaling_factor = 1 + + UHFQC.prepare_SSB_weight_and_rotation( + IF=self.ro_freq_mod(), + weight_chI=self.ro_acq_weight_chI(), + weight_chQ=None, + rotation_angle=self.ro_acq_rotated_SSB_rotation_angle(), + length=self.ro_acq_integration_length_weigth_function(), + scaling_factor=scaling_factor) + else: + # When optimal weights are used, only the RO I weight + # channel is used + + # FIXME!: Dirty hack because of qusurf issue #63, adds + # delay samples in the optimized weights + opt_WI = self.ro_acq_weight_func_I() + opt_WQ = self.ro_acq_weight_func_Q() + del_sampl = self.ro_acq_weight_func_delay_samples_hack() + if del_sampl > 0: + zeros = np.zeros(abs(del_sampl)) + opt_WI = np.concatenate( + [opt_WI[abs(del_sampl):], zeros]) + opt_WQ = np.concatenate( + [opt_WQ[abs(del_sampl):], zeros]) + elif del_sampl < 0: + zeros = np.zeros(abs(del_sampl)) + opt_WI = np.concatenate( + [zeros, opt_WI[:-abs(del_sampl)]]) + opt_WQ = np.concatenate( + [zeros, opt_WQ[:-abs(del_sampl)]]) + else: + pass + UHFQC.set('qas_0_integration_weights_{}_real'.format( + self.ro_acq_weight_chI()), opt_WI) + UHFQC.set('qas_0_integration_weights_{}_imag'.format( + self.ro_acq_weight_chI()), opt_WQ) + UHFQC.set('qas_0_rotations_{}'.format( + self.ro_acq_weight_chI()), 1.0 - 1.0j) + if self.ro_acq_weight_type() == 'optimal IQ': + print('setting the optimal Q') + UHFQC.set('qas_0_integration_weights_{}_real'.format( + self.ro_acq_weight_chQ()), opt_WQ) + UHFQC.set('qas_0_integration_weights_{}_imag'.format( + self.ro_acq_weight_chQ()), opt_WI) + UHFQC.set('qas_0_rotations_{}'.format( + self.ro_acq_weight_chQ()), 1.0 + 1.0j) + elif self.ro_acq_weight_type() == 'optimal IQ2': + print('setting the optimal Q') + opt_WI2 = self.ro_acq_weight_func_I_2() + opt_WQ2 = self.ro_acq_weight_func_Q_2() + UHFQC.set('qas_0_integration_weights_{}_real'.format( + self.ro_acq_weight_chQ()), opt_WI2) + UHFQC.set('qas_0_integration_weights_{}_imag'.format( + self.ro_acq_weight_chQ()), opt_WQ2) + UHFQC.set('qas_0_rotations_{}'.format( + self.ro_acq_weight_chQ()), 1.0 - 1.0j) + + else: + raise NotImplementedError( + 'CBox, DDM or other are currently not supported') + + def prepare_for_timedomain(self): + self.prepare_readout() + self._prep_td_sources() + self._prep_mw_pulses() + if self.instr_LutMan_LRU(): + self._prep_LRU_pulses() + if self.cfg_with_vsm(): + self._prep_td_configure_VSM() + + def _prep_td_sources(self): + # if self.instr_spec_source() is not None: + # self.instr_spec_source.get_instr().off() + # self.instr_LO_mw.get_instr().on() + # self.instr_LO_mw.get_instr().pulsemod_state(False) + # # Set source to fs =f-f_mod such that pulses appear at f = fs+f_mod + # self.instr_LO_mw.get_instr().frequency.set( + # self.freq_qubit.get() - self.mw_freq_mod.get()) + + # self.instr_LO_mw.get_instr().power.set(self.mw_pow_td_source.get()) + + MW_LutMan = self.instr_LutMan_MW.get_instr() + + if self.instr_spec_source() is not None: + self.instr_spec_source.get_instr().off() + self.instr_LO_mw.get_instr().on() + self.instr_LO_mw.get_instr().pulsemod_state(False) + + if MW_LutMan.cfg_sideband_mode() == 'static': + # Set source to fs =f-f_mod such that pulses appear at f = fs+f_mod + self.instr_LO_mw.get_instr().frequency.set( + self.freq_qubit.get() - self.mw_freq_mod.get()) + elif MW_LutMan.cfg_sideband_mode() == 'real-time': + # For historic reasons, will maintain the change qubit frequency here in + # _prep_td_sources, even for real-time mode, where it is only changed in the HDAWG + if ((MW_LutMan.channel_I()-1)//2 != (MW_LutMan.channel_Q()-1)//2): + raise KeyError('In real-time sideband mode, channel I/Q should share same awg group.') + self.mw_freq_mod(self.freq_qubit.get() - self.instr_LO_mw.get_instr().frequency.get()) + MW_LutMan.AWG.get_instr().set('oscs_{}_freq'.format((MW_LutMan.channel_I()-1)//2), + self.mw_freq_mod.get()) + else: + raise ValueError('Unexpected value for parameter cfg_sideband_mode.') + + self.instr_LO_mw.get_instr().power.set(self.mw_pow_td_source.get()) + ################################### + # Prepare LRU source + ################################### + if self.instr_LutMan_LRU(): + LRU_lutman = self.instr_LutMan_LRU.get_instr() + self.instr_LO_LRU.get_instr().on() + # self.instr_LO_LRU.get_instr().pulsemod_state(False) + # If the lutman type is MW + if isinstance(LRU_lutman, + pycqed.instrument_drivers.meta_instrument.LutMans.mw_lutman.LRU_MW_LutMan): + if LRU_lutman.cfg_sideband_mode() == 'static': + # Set source to fs =f-f_mod such that pulses appear at f = fs+f_mod + self.instr_LO_LRU.get_instr().frequency.set( + self.LRU_freq.get() - self.LRU_freq_mod.get()) + LRU_lutman.mw_lru_modulation(self.LRU_freq_mod.get()) + elif LRU_lutman.cfg_sideband_mode() == 'real-time': + # For historic reasons, will maintain the change qubit frequency here in + # _prep_td_sources, even for real-time mode, where it is only changed in the HDAWG + if ((LRU_lutman.channel_I()-1)//2 != (LRU_lutman.channel_Q()-1)//2): + raise KeyError('In real-time sideband mode, channel I/Q should share same awg group.') + self.LRU_freq_mod(self.LRU_freq.get() - self.instr_LO_LRU.get_instr().frequency.get()) + LRU_lutman.AWG.get_instr().set('oscs_{}_freq'.format((LRU_lutman.channel_I()-1)//2), + self.LRU_freq_mod.get()) + LRU_lutman.mw_lru_modulation(self.LRU_freq_mod.get()) + else: + raise ValueError('Unexpected value for parameter LRU cfg_sideband_mode.') + # If the lutman type is FLUX + elif isinstance(LRU_lutman, + pycqed.instrument_drivers.meta_instrument.LutMans.flux_lutman_vcz.LRU_Flux_LutMan): + # Set modulation frequency and keep LO frequency + _mw_mod = self.LRU_freq.get() - self.instr_LO_LRU.get_instr().frequency.get() + if _mw_mod < 0: + raise ValueError('Modulation frequency for flux LRU LutMan \ + cannot be negative. Change LRU LO frequency.') + else: + self.LRU_freq_mod(_mw_mod) + # If qubit does not have Lutman but has LRU LO. + # (This is meant to handle cases where the LRU pulse is generated + # from the AWG of another qubit. In these cases we want to update + # the frequency of the LO based on the modulation frequency). + elif self.instr_LO_LRU(): + # Since these qubits are using the pulse from another qubit + # AWG, the modulation frequency of their LRU pulse is inherited. + # This makes the preparation of their LO kinda of tricky. To + # solve it, we hardcode their modulation frequencies here. + self.find_instrument('D1').LRU_freq_mod(self.find_instrument('D4').LRU_freq_mod()) + self.find_instrument('D2').LRU_freq_mod(self.find_instrument('D6').LRU_freq_mod()) + self.find_instrument('D3').LRU_freq_mod(self.find_instrument('D4').LRU_freq_mod()) + self.find_instrument('D7').LRU_freq_mod(self.find_instrument('D6').LRU_freq_mod()) + self.find_instrument('D8').LRU_freq_mod(self.find_instrument('D5').LRU_freq_mod()) + self.find_instrument('D9').LRU_freq_mod(self.find_instrument('D5').LRU_freq_mod()) + self.find_instrument('X1').LRU_freq_mod(self.find_instrument('X4').LRU_freq_mod()) + self.find_instrument('X2').LRU_freq_mod(self.find_instrument('Z1').LRU_freq_mod()) + # Prepare LO source frequency + LO_source = self.instr_LO_LRU.get_instr() + LO_source.frequency(self.LRU_freq()-self.LRU_freq_mod()) + LO_source.on() + LO_source.pulsemod_state(False) + + def _prep_mw_pulses(self): + # 1. Gets instruments and prepares cases + MW_LutMan = self.instr_LutMan_MW.get_instr() + AWG = MW_LutMan.AWG.get_instr() + + # 2. Prepares map and parameters for waveforms + # (except pi-pulse amp, which depends on VSM usage) + MW_LutMan.mw_amp90_scale(self.mw_amp90_scale()) + MW_LutMan.mw_gauss_width(self.mw_gauss_width()) + MW_LutMan.channel_amp(self.mw_channel_amp()) + MW_LutMan.channel_range(self.mw_channel_range()) + MW_LutMan.mw_motzoi(self.mw_motzoi()) + MW_LutMan.mw_modulation(self.mw_freq_mod()) + MW_LutMan.spec_amp(self.spec_amp()) + + # used for ef pulsing + MW_LutMan.mw_ef_amp180(self.mw_ef_amp()) + MW_LutMan.mw_fh_amp180(self.mw_fh_amp()) + # MW_LutMan.mw_ef_modulation(MW_LutMan.mw_modulation() + + # self.anharmonicity()) + if MW_LutMan.cfg_sideband_mode() != 'real-time': + MW_LutMan.mw_ef_modulation(MW_LutMan.mw_modulation() + + self.anharmonicity()) + MW_LutMan.mw_fh_modulation(MW_LutMan.mw_modulation() + + self.anharmonicity_3state()) + else: + MW_LutMan.mw_ef_modulation(self.anharmonicity()) + MW_LutMan.mw_fh_modulation(self.anharmonicity_3state()) + + # 3. Does case-dependent things: + # mixers offset+skewness + # pi-pulse amplitude + if self.cfg_with_vsm(): + # case with VSM (both QWG and AWG8) + MW_LutMan.mw_amp180(self.mw_amp180()) + MW_LutMan.G_mixer_phi(self.mw_G_mixer_phi()) + MW_LutMan.G_mixer_alpha(self.mw_G_mixer_alpha()) + MW_LutMan.D_mixer_phi(self.mw_D_mixer_phi()) + MW_LutMan.D_mixer_alpha(self.mw_D_mixer_alpha()) + + MW_LutMan.channel_GI(0+self.mw_awg_ch()) + MW_LutMan.channel_GQ(1+self.mw_awg_ch()) + MW_LutMan.channel_DI(2+self.mw_awg_ch()) + MW_LutMan.channel_DQ(3+self.mw_awg_ch()) + + if self._using_QWG(): + # N.B. This part is QWG specific + if hasattr(MW_LutMan, 'channel_GI'): + # 4-channels are used for VSM based AWG's. + AWG.ch1_offset(self.mw_mixer_offs_GI()) + AWG.ch2_offset(self.mw_mixer_offs_GQ()) + AWG.ch3_offset(self.mw_mixer_offs_DI()) + AWG.ch4_offset(self.mw_mixer_offs_DQ()) + else: # using_AWG8 + # N.B. This part is AWG8 specific + AWG.set('sigouts_{}_offset'.format(self.mw_awg_ch()-1), + self.mw_mixer_offs_GI()) + AWG.set('sigouts_{}_offset'.format(self.mw_awg_ch()+0), + self.mw_mixer_offs_GQ()) + AWG.set('sigouts_{}_offset'.format(self.mw_awg_ch()+1), + self.mw_mixer_offs_DI()) + AWG.set('sigouts_{}_offset'.format(self.mw_awg_ch()+2), + self.mw_mixer_offs_DQ()) + else: + if self._using_QWG(): + # case without VSM and with QWG + if ((self.mw_G_mixer_phi() != self.mw_D_mixer_phi()) + or (self.mw_G_mixer_alpha() != self.mw_D_mixer_alpha())): + log.warning('CCL_Transmon {}; _prep_mw_pulses: ' + 'no VSM detected, using mixer parameters' + ' from gaussian channel.'.format(self.name)) + MW_LutMan.mixer_phi(self.mw_G_mixer_phi()) + MW_LutMan.mixer_alpha(self.mw_G_mixer_alpha()) + AWG.set('ch{}_offset'.format(MW_LutMan.channel_I()), + self.mw_mixer_offs_GI()) + AWG.set('ch{}_offset'.format(MW_LutMan.channel_Q()), + self.mw_mixer_offs_GQ()) + else: + # case without VSM (and AWG8) + MW_LutMan.mw_amp180(self.mw_amp180()) + MW_LutMan.mixer_phi(self.mw_G_mixer_phi()) + MW_LutMan.mixer_alpha(self.mw_G_mixer_alpha()) + + # N.B. This part is AWG8 specific + AWG.set('sigouts_{}_offset'.format(self.mw_awg_ch()-1), + self.mw_mixer_offs_GI()) + AWG.set('sigouts_{}_offset'.format(self.mw_awg_ch()+0), + self.mw_mixer_offs_GQ()) + + # 4. reloads the waveforms + if self.cfg_prepare_mw_awg(): + MW_LutMan.load_waveforms_onto_AWG_lookuptable() + else: + log.warning('"cfg_prepare_mw_awg" set to False, ' + 'not preparing microwave pulses.') + + # 5. upload commandtable for virtual-phase gates + MW_LutMan.upload_single_qubit_phase_corrections() + + def _prep_LRU_pulses(self): + ''' + Prepare LRU pulses from LRU lutman. + ''' + # 1. Gets instruments + LRU_LutMan = self.instr_LutMan_LRU.get_instr() + AWG = LRU_LutMan.AWG.get_instr() + lutman_type = str(type(LRU_LutMan)) + # Check types of Lutman: + # Doing stuff like this is probably a terrible idea, + # however, you can do this by using the isinstance command + # since you'll just get False always + mw_type = str(pycqed.instrument_drivers.meta_instrument.LutMans.mw_lutman.LRU_MW_LutMan) + fl_type = str(pycqed.instrument_drivers.meta_instrument.LutMans.flux_lutman_vcz.LRU_Flux_LutMan) + # If the lutman type is MW + if lutman_type == mw_type: + # 2. Prepares map and parameters for waveforms + LRU_LutMan.channel_amp(self.LRU_channel_amp()) + LRU_LutMan.channel_range(self.LRU_channel_range()) + LRU_LutMan.mw_lru_modulation(self.LRU_freq_mod()) + LRU_LutMan.mixer_phi(0) + LRU_LutMan.mixer_alpha(1) + + LRU_LutMan.mw_lru_amplitude(self.LRU_amplitude()) + LRU_LutMan.mw_lru_duration(self.LRU_duration()) + LRU_LutMan.mw_lru_rise_duration(self.LRU_duration_rise()) + # Set all waveforms to be LRU pulse + # to ensure same waveform duration. + _lm = { 0: {'name': 'I', 'type': 'lru_idle'}, + 1: {'name': 'lru', 'type': 'lru'}} + LRU_LutMan.LutMap(_lm) + # TO DO: implement mixer corrections on LRU lutman + # N.B. This part is AWG8 specific + AWG.set('sigouts_{}_offset'.format(LRU_LutMan.channel_I()-1), + self.lru_mixer_offs_GI()) + AWG.set('sigouts_{}_offset'.format(LRU_LutMan.channel_I()+0), + self.lru_mixer_offs_GQ()) + # 4. reloads the waveforms + LRU_LutMan.load_waveforms_onto_AWG_lookuptable() + # 5. upload commandtable for virtual-phase gates + LRU_LutMan.upload_single_qubit_phase_corrections() + # If the lutman type is FLUX + elif lutman_type == fl_type: + # 2. Prepares map and parameters for waveforms + LRU_LutMan.cfg_awg_channel_amplitude(self.LRU_channel_amp()) + # LRU_LutMan.cfg_awg_channel_range(self.LRU_channel_range()) + LRU_LutMan.mw_lru_amplitude(self.LRU_amplitude()) + LRU_LutMan.mw_lru_duration(self.LRU_duration()) + LRU_LutMan.mw_lru_rise_duration(self.LRU_duration_rise()) + LRU_LutMan.mw_lru_modulation(self.LRU_freq_mod()) + # 3. upload commandtable for virtual-phase gates + LRU_LutMan.load_waveforms_onto_AWG_lookuptable() + + def _prep_td_configure_VSM(self): + # Configure VSM + VSM = self.instr_VSM.get_instr() + VSM.set('ch{}_frequency'.format( + self.mw_vsm_ch_in()), self.freq_qubit()) + for mod in range(1, 9): + VSM.set('mod{}_ch{}_marker_state'.format( + mod, self.spec_vsm_ch_in()), 'off') + VSM.set('mod{}_ch{}_marker_state'.format( + self.mw_vsm_mod_out(), self.mw_vsm_ch_in()), 'on') + VSM.set('mod{}_marker_source'.format( + self.mw_vsm_mod_out()), self.mw_vsm_marker_source()) + VSM.set('mod{}_ch{}_derivative_amp'.format( + self.mw_vsm_mod_out(), self.mw_vsm_ch_in()), self.mw_vsm_D_amp()) + VSM.set('mod{}_ch{}_derivative_phase'.format( + self.mw_vsm_mod_out(), self.mw_vsm_ch_in()), self.mw_vsm_D_phase()) + VSM.set('mod{}_ch{}_gaussian_amp'.format( + self.mw_vsm_mod_out(), self.mw_vsm_ch_in()), self.mw_vsm_G_amp()) + VSM.set('mod{}_ch{}_gaussian_phase'.format( + self.mw_vsm_mod_out(), self.mw_vsm_ch_in()), self.mw_vsm_G_phase()) + + self.instr_CC.get_instr().set( + 'vsm_channel_delay{}'.format(self.cfg_qubit_nr()), + self.mw_vsm_delay()) + + def _prep_cw_configure_VSM(self): + # Configure VSM + VSM = self.instr_VSM.get_instr() + for mod in range(1, 9): + VSM.set('mod{}_ch{}_marker_state'.format( + mod, self.mw_vsm_ch_in()), 'off') + VSM.set('mod{}_ch{}_marker_state'.format( + self.mw_vsm_mod_out(), self.spec_vsm_ch_in()), 'on') + VSM.set('mod{}_marker_source'.format( + self.mw_vsm_mod_out()), self.mw_vsm_marker_source()) + + def prepare_for_fluxing(self, reset=True): + pass + + def prepare_characterizing(self, exceptions: list = [], verbose=True): + """ + Prepares the qubit for (automatic) characterisation. Will park all + other qubits in the device object to their 'anti-sweetspot' (which is a + sweetspot as well technically speaking). Afterwards, it will move + the qubit to be characterized (self) to its sweetspot. + + Will ignore any qubit whose name (string) is in 'exceptions' + """ + + fluxcurrent = self.instr_FluxCtrl.get_instr() + device = self.instr_device.get_instr() + + exceptions.append('fakequbit') + Qs = device.qubits() + for Q in Qs: + if device.find_instrument(Q).fl_dc_I_per_phi0() == 1: + exceptions.append(Q) + # exceptions.append('D2') + # First park all other qubits to anti sweetspot + print('Moving other qubits away ...') + for qubit_name in device.qubits(): + if (qubit_name not in exceptions) and (qubit_name != self.name): + qubit = device.find_instrument(qubit_name) + channel = qubit.fl_dc_ch() + current = qubit.fl_dc_I0() + qubit.fl_dc_I_per_phi0()/2 + fluxcurrent[channel](current) + if verbose: + print('\t Moving {} to {:.3f} mA' + .format(qubit_name, current/1e-3)) + # Move self to sweetspot: + if verbose: + print('Moving {} to {:.3f} mA'.format( + self.name, self.fl_dc_I0()/1e-3)) + fluxcurrent[self.fl_dc_ch()](self.fl_dc_I0()) + return True + + #################################################### + # CCL_transmon specifc calibrate_ methods below + #################################################### + def find_frequency_adaptive(self, f_start=None, f_span=1e9, f_step=0.5e6, + MC=None, update=True, use_max=False, + spec_mode='pulsed_marked', verbose=True): + """ + 'Adaptive' measurement for finding the qubit frequency. Will look with + a range of the current frequency estimate, and if it does not find a + peak it will move and look f_span Hz above and below the estimate. Will + continue to do such a shift until a peak is found. + """ + if MC is None: + MC = self.instr_MC.get_instr() + + if f_start is None: + f_start = self.freq_qubit() + + # Set high power and averages to be sure we find the peak. + # self.spec_pow(-30) + # self.ro_pulse_amp_CW(0.025) + # old_avg = self.ro_acq_averages() + # self.ro_acq_averages(2**15) + # Repeat measurement while no peak is found: + success = False + f_center = f_start + n = 0 + while not success: + success = None + f_center += f_span*n*(-1)**n + n += 1 + if verbose: + cfreq, cunit = plt_tools.SI_val_to_msg_str( + f_center, 'Hz', float) + sfreq, sunit = plt_tools.SI_val_to_msg_str(f_span, 'Hz', float) + print('Doing adaptive spectroscopy around {:.3f} {} with a ' + 'span of {:.0f} {}.'.format(cfreq, cunit, sfreq, sunit)) + + freqs = np.arange(f_center - f_span/2, f_center + f_span/2, f_step) + + self.measure_spectroscopy(MC=MC, freqs=freqs, mode=spec_mode, + analyze=False) + label = 'spec' + + # Use 'try' because it can give a TypeError when no peak is found + try: + analysis_spec = ma.Qubit_Spectroscopy_Analysis(label=label, + close_fig=True, + qb_name=self.name) + except TypeError: + log.warning('TypeError in Adaptive spectroscopy') + continue + # Check for peak and check its height + freq_peak = analysis_spec.peaks['peak'] + offset = analysis_spec.fit_res.params['offset'].value + peak_height = np.amax(analysis_spec.data_dist) + + # Check if peak is not another qubit, and if it is move that qubit away + for qubit_name in self.instr_device.get_instr().qubits(): + qubit = self.instr_device.get_instr().find_instrument(qubit_name) + if qubit.name != self.name and qubit.freq_qubit() is not None: + + if np.abs(qubit.freq_qubit()-freq_peak) < 5e6: + if verbose: + log.warning('Peak found at frequency of {}. ' + 'Adjusting currents' + .format(qubit.name)) + fluxcurrent = self.instr_FluxCtrl.get_instr() + old_current = fluxcurrent[qubit.fl_dc_ch()]() + fluxcurrent[qubit.fl_dc_ch()](5e-3) + n -= 1 + success = False + + if success is None: + if freq_peak is None: + success = False + elif peak_height < 4*offset: + success = False + elif peak_height < 3*np.mean(analysis_spec.data_dist): + success = False + else: + success = True + + # self.ro_acq_averages(old_avg) + if update: + if use_max: + self.freq_qubit(analysis_spec.peaks['peak']) + else: + self.freq_qubit(analysis_spec.fitted_freq) + return True + + def calibrate_ro_pulse_amp_CW(self, freqs=None, powers=None, update=True): + """ + Does a resonator power scan and determines at which power the low power + regime is exited. If update=True, will set the readout power to this + power. + """ + + if freqs is None: + freq_center = self.freq_res() + freq_range = 10e6 + freqs = np.arange(freq_center - freq_range/2, + freq_center + freq_range/2, + 0.1e6) + + if powers is None: + powers = np.arange(-40, 0.1, 8) + + self.measure_resonator_power(freqs=freqs, powers=powers, analyze=False) + fit_res = ma.Resonator_Powerscan_Analysis(label='Resonator_power_scan', + close_fig=True) + if update: + ro_pow = 10**(fit_res.power/20) + self.ro_pulse_amp_CW(ro_pow) + self.ro_pulse_amp(ro_pow) + self.freq_res(fit_res.f_low) + if self.freq_qubit() is None: + f_qubit_estimate = self.freq_res() + (65e6)**2/(fit_res.shift) + log.info('No qubit frquency found. Updating with RWA to {}' + .format(f_qubit_estimate)) + self.freq_qubit(f_qubit_estimate) + + return True + + def find_qubit_sweetspot(self, freqs=None, dac_values=None, update=True, + set_to_sweetspot=True, method='DAC', fluxChan=None, + spec_mode='pulsed_marked'): + """ + Should be edited such that it contains reference to different measurement + methods (tracking / 2D scan / broad spectroscopy) + + method = 'DAC' - uses ordinary 2D DAC scan + 'tracked - uses tracked spectroscopy (not really implemented)' + TODO: If spectroscopy does not yield a peak, it should discard it + """ + + if freqs is None: + freq_center = self.freq_qubit() + freq_range = 50e6 + freqs = np.arange(freq_center - freq_range, freq_center + freq_range, + 1e6) + if dac_values is None: + if self.fl_dc_I0() is not None: + dac_values = np.linspace(self.fl_dc_I0() - 1e-3, + self.fl_dc_I0() + 1e-3, 8) + else: + dac_values = np.linspace(-0.5e3, 0.5e-3, 10) + + if fluxChan is None: + if self.fl_dc_ch() is not None: + fluxChan = self.fl_dc_ch() + else: + log.error('No fluxchannel found or specified. Please ' + 'specify fluxChan') + + if method == 'DAC': + t_start = time.strftime('%Y%m%d_%H%M%S') + self.measure_qubit_frequency_dac_scan(freqs=freqs, + dac_values=dac_values, + fluxChan=fluxChan, + analyze=False, + mode=spec_mode, + nested_resonator_calibration=False, + # nested_resonator_calibration_use_min=False, + resonator_freqs=np.arange(-5e6, 5e6, 0.2e6)+self.freq_res()) + + timestamp = a_tools.get_timestamps_in_range(t_start, + label='Qubit_dac_scan' + + self.msmt_suffix) + timestamp = timestamp[0] + a = ma2.da.DAC_analysis(timestamp=timestamp) + self.fl_dc_polycoeff(a.dac_fit_res['fit_polycoeffs']) + sweetspot_current = a.dac_fit_res['sweetspot_dac'] + + elif method == 'tracked': + t_start = time.strftime('%Y%m%d_%H%M%S') + + for i, dac_value in enumerate(dac_values): + self.instr_FluxCtrl.get_instr()[self.fl_dc_ch()](dac_value) + if i == 0: + self.find_frequency(freqs=freqs, update=True) + else: + self.find_frequency(update=True) + + t_end = time.strftime('%Y%m%d_%H%M%S') + + a = ma2.DACarcPolyFit(t_start=t_start, t_stop=t_end, + label='spectroscopy__' + self.name, + dac_key='Instrument settings.fluxcurrent.'+self.fl_dc_ch(), + degree=2) + + pc = a.fit_res['fit_polycoeffs'] + + self.fl_dc_polycoeff(pc) + sweetspot_current = -pc[1]/(2*pc[0]) + + else: + log.error('Sweetspot method {} unknown. ' + 'Use "DAC" or "tracked".'.format(method)) + + if update: + self.fl_dc_I0(sweetspot_current) + self.freq_max(self.calc_current_to_freq(sweetspot_current)) + if set_to_sweetspot: + self.instr_FluxCtrl.get_instr()[self.fl_dc_ch()](sweetspot_current) + + # Sanity check: does this peak move with flux? + check_vals = [self.calc_current_to_freq(np.min(dac_values)), + self.calc_current_to_freq(self.fl_dc_I0()), + self.calc_current_to_freq(np.max(dac_values))] + + if check_vals[0] == pytest.approx(check_vals[1], abs=0.5e6): + if check_vals[0] == pytest.approx(check_vals[2], abs=0.5e6): + if check_vals[1] == pytest.approx(check_vals[2], abs=0.5e6): + log.warning('No qubit shift found with varying flux. ' + 'Peak is not a qubit') + return False + + if self.fl_dc_polycoeff()[1] < 1e6 and self.fl_dc_polycoeff()[2] < 1e6: + log.warning('No qubit shift found with varying flux. Peak is ' + 'not a qubit') + return False + + return True + + def find_qubit_sweetspot_1D(self, freqs=None, dac_values=None): + + # self.spec_pow(-30) + self.ro_acq_averages(2**14) + + if dac_values is None: + if self.fl_dc_I0() is not None: + dac_values = np.linspace(self.fl_dc_I0() - 1e-3, + self.fl_dc_I0() + 1e-3, 8) + else: + dac_values = np.linspace(-1e3, 1e-3, 8) + + if freqs is None: + freq_center = self.freq_qubit() + freq_range = 50e6 + freqs = np.arange(freq_center - freq_range, freq_center + freq_range, + 0.5e6) + Qubit_frequency = [] + Reson_frequency = [] + flux_channel = self.fl_dc_ch() + + for dac_value in dac_values: + # Set Flux Current + self.instr_FluxCtrl.get_instr()[flux_channel](dac_value) + + # Find Resonator + self.find_resonator_frequency(freqs=np.arange(-5e6, 5.1e6, .1e6)+self.freq_res(), + use_min=True) + # Find Qubit frequency + self.find_frequency(freqs=freqs) + + Qubit_frequency.append(self.freq_qubit()) + Reson_frequency.append(self.freq_res()) + + # Fit sweetspot with second degree polyfit + fit_coefs = np.polyfit(dac_values, Qubit_frequency, deg=2) + sweetspot_current = fit_coefs[1]/(2*fit_coefs[0]) + + # Set Flux Current to sweetspot + self.instr_FluxCtrl.get_instr()[flux_channel](sweetspot_current) + self.find_resonator_frequency(freqs=np.arange(-5e6, 5.1e6, .1e6)+self.freq_res(), + use_min=True) + frequency_sweet_spot = self.find_frequency( + freqs=np.arange(-50e6, 50e6, .5e6)+self.freq_qubit()) + + return frequency_sweet_spot + + def find_anharmonicity_estimate(self, freqs=None, anharmonicity=None, + mode='pulsed_marked', update=True, power_12=10): + """ + Finds an estimate of the anharmonicity by doing a spectroscopy around + 150 MHz below the qubit frequency. + + TODO: if spec_pow is too low/high, it should adjust it to approx the + ideal spec_pow + 25 dBm + """ + + if anharmonicity is None: + # Standard estimate, negative by convention + anharmonicity = self.anharmonicity() + + f02_estimate = self.freq_qubit()*2 + anharmonicity + + if freqs is None: + freq_center = f02_estimate/2 + freq_range = 175e6 + freqs = np.arange(freq_center-1/2*freq_range, self.freq_qubit()+1/2*freq_range, + 0.5e6) + old_spec_pow = self.spec_pow() + self.spec_pow(self.spec_pow()+power_12) + + self.measure_spectroscopy(freqs=freqs, mode=mode, analyze=False) + + a = ma.Qubit_Spectroscopy_Analysis(label=self.msmt_suffix, + analyze_ef=True) + self.spec_pow(old_spec_pow) + f02 = 2*a.params['f0_gf_over_2'].value + if update: + self.anharmonicity(f02-2*self.freq_qubit()) + return True + + def calibrate_mw_pulse_amplitude_coarse(self, + amps=None, + close_fig=True, verbose=False, + MC=None, update=True, + all_modules=False): + """ + Calibrates the pulse amplitude using a single rabi oscillation. + Depending on self.cfg_with_vsm uses VSM or AWG channel amplitude + to sweep the amplitude of the pi pulse + + For details see self.measure_rabi + """ + if amps is None: + if self.cfg_with_vsm(): + amps = np.linspace(0.1, 1, 31) + else: + amps = np.linspace(0, 1, 31) + + self.measure_rabi(amps=amps, MC=MC, analyze=False, + all_modules=all_modules) + a = ma.Rabi_Analysis(close_fig=close_fig, label='rabi') + try: + if update: + if self.cfg_with_vsm(): + self.mw_vsm_G_amp(a.rabi_amplitudes['piPulse']) + else: + self.mw_channel_amp(a.rabi_amplitudes['piPulse']) + except(ValueError): + log.warning("Extracted piPulse amplitude out of parameter range. " + "Keeping previous value.") + return True + + def calibrate_mw_pulse_amplitude_coarse_test(self, + amps=None, + close_fig=True, verbose=False, + MC=None, update=True, + all_modules=False): + """ + Calibrates the pulse amplitude using a single rabi oscillation. + Depending on self.cfg_with_vsm uses VSM or AWG channel amplitude + to sweep the amplitude of the pi pulse + + For details see self.measure_rabi + """ + self.ro_acq_averages(2**10) + self.ro_soft_avg(3) + # self.mw_gauss_width(10e-9) + # self.mw_pulse_duration()=4*self.mw_gauss_width() + if amps is None: + if self.cfg_with_vsm(): + amps = np.linspace(0.1, 1, 31) + else: + amps = np.linspace(0, 1, 31) + + self.measure_rabi(amps=amps, MC=MC, analyze=False, + all_modules=all_modules) + a = ma.Rabi_Analysis(close_fig=close_fig, label='rabi') + old_gw = self.mw_gauss_width() + if a.rabi_amplitudes['piPulse'] > 1 or a.rabi_amplitudes['piHalfPulse'] > a.rabi_amplitudes['piPulse']: + self.mw_gauss_width(2*old_gw) + self.prepare_for_timedomain() + mw_lutman.load_waveforms_onto_AWG_lookuptable( + force_load_sequencer_program=False) + + try: + if self.cfg_with_vsm(): + self.mw_vsm_G_amp(a.rabi_amplitudes['piPulse']) + else: + self.mw_channel_amp(a.rabi_amplitudes['piPulse']) + except(ValueError): + log.warning("Extracted piPulse amplitude out of parameter range. " + "Keeping previous value.") + return True + + def calibrate_mw_vsm_delay(self): + """ + Uploads a sequence for calibrating the vsm delay. + The experiment consists of a single square pulse of 20 ns that + triggers both the VSM channel specified and the AWG8. + + Note: there are two VSM markers, align with the first of two. + + By changing the "mw_vsm_delay" parameter the delay can be calibrated. + N.B. Ensure that the signal is visible on a scope or in the UFHQC + readout first! + """ + self.prepare_for_timedomain() + CCL = self.instr_CC.get_instr() + CCL.stop() + p = sqo.vsm_timing_cal_sequence( + qubit_idx=self.cfg_qubit_nr(), + platf_cfg=self.cfg_openql_platform_fn()) + CCL.eqasm_program(p.filename) + CCL.start() + print('CCL program is running. Parameter "mw_vsm_delay" can now be ' + 'calibrated by hand.') + + def calibrate_motzoi(self, MC=None, verbose=True, update=True, motzois=None, + disable_metadata=False, prepare_for_timedomain=True): + """ + Calibrates the DRAG coeffcieint value, named motzoi (after Felix Motzoi) + for legacy reasons. + + For details see docstring of measure_motzoi method. + """ + using_VSM = self.cfg_with_vsm() + if using_VSM and motzois is None: + motzois = gen_sweep_pts(start=0.1, stop=1.0, num=31) + elif motzois is None: + motzois = gen_sweep_pts(center=0, span=.3, num=31) + + # large range + a = self.measure_motzoi(MC=MC, motzoi_amps=motzois, analyze=True, + prepare_for_timedomain=prepare_for_timedomain, + disable_metadata=disable_metadata) + opt_motzoi = a.get_intersect()[0] + if opt_motzoi > max(motzois) or opt_motzoi < min(motzois): + if verbose: + print('optimal motzoi {:.3f} '.format(opt_motzoi) + + 'outside of measured span, aborting') + return False + if update: + if using_VSM: + if verbose: + print('Setting motzoi to {:.3f}'.format(opt_motzoi)) + self.mw_vsm_D_amp(opt_motzoi) + else: + self.mw_motzoi(opt_motzoi) + return opt_motzoi + + def calibrate_mixer_offsets_drive(self, mixer_channels=['G', 'D'], + update: bool = True, ftarget=-110, + maxiter=300)-> bool: + """ + Calibrates the mixer offset and updates the I and Q offsets in + the qubit object. + + Args: + mixer_channels (list): + No use in no-VSM case + With VSM specifies whether to calibrate offsets for both + gaussuan 'G' and derivarive 'D' channel + + update (bool): + should optimal values be set in the qubit object + + ftarget (float): power of the signal at the LO frequency + for which the optimization is terminated + """ + + # turn relevant channels on + + using_VSM = self.cfg_with_vsm() + + MW_LutMan = self.instr_LutMan_MW.get_instr() + + AWG = MW_LutMan.AWG.get_instr() + if using_VSM: + if AWG.__class__.__name__ == 'QuTech_AWG_Module': + chGI_par = AWG.parameters['ch1_offset'] + chGQ_par = AWG.parameters['ch2_offset'] + chDI_par = AWG.parameters['ch3_offset'] + chDQ_par = AWG.parameters['ch4_offset'] + + else: + # This part is AWG8 specific and wont work with a QWG + awg_ch = self.mw_awg_ch() + AWG.stop() + AWG.set('sigouts_{}_on'.format(awg_ch-1), 1) + AWG.set('sigouts_{}_on'.format(awg_ch+0), 1) + AWG.set('sigouts_{}_on'.format(awg_ch+1), 1) + AWG.set('sigouts_{}_on'.format(awg_ch+2), 1) + + chGI_par = AWG.parameters['sigouts_{}_offset'.format(awg_ch-1)] + chGQ_par = AWG.parameters['sigouts_{}_offset'.format(awg_ch+0)] + chDI_par = AWG.parameters['sigouts_{}_offset'.format(awg_ch+1)] + chDQ_par = AWG.parameters['sigouts_{}_offset'.format(awg_ch+2)] + # End of AWG8 specific part + + VSM = self.instr_VSM.get_instr() + + ch_in = self.mw_vsm_ch_in() + # module 8 is hardcoded for mixer calibartions (signal hound) + VSM.set('mod8_marker_source'.format(ch_in), 'int') + VSM.set('mod8_ch{}_marker_state'.format(ch_in), 'on') + + # Calibrate Gaussian component mixer + if 'G' in mixer_channels: + VSM.set('mod8_ch{}_gaussian_amp'.format(ch_in), 1.0) + VSM.set('mod8_ch{}_derivative_amp'.format(ch_in), 0.1) + offset_I, offset_Q = cal_toolbox.mixer_carrier_cancellation( + SH=self.instr_SH.get_instr(), + source=self.instr_LO_mw.get_instr(), + MC=self.instr_MC.get_instr(), + chI_par=chGI_par, chQ_par=chGQ_par, + label='Mixer_offsets_drive_G'+self.msmt_suffix, + ftarget=ftarget, maxiter=maxiter) + if update: + self.mw_mixer_offs_GI(offset_I) + self.mw_mixer_offs_GQ(offset_Q) + if 'D' in mixer_channels: + # Calibrate Derivative component mixer + VSM.set('mod8_ch{}_gaussian_amp'.format(ch_in), 0.1) + VSM.set('mod8_ch{}_derivative_amp'.format(ch_in), 1.0) + + offset_I, offset_Q = cal_toolbox.mixer_carrier_cancellation( + SH=self.instr_SH.get_instr(), + source=self.instr_LO_mw.get_instr(), + MC=self.instr_MC.get_instr(), + chI_par=chDI_par, + chQ_par=chDQ_par, + label='Mixer_offsets_drive_D'+self.msmt_suffix, + ftarget=ftarget, maxiter=maxiter) + if update: + self.mw_mixer_offs_DI(offset_I) + self.mw_mixer_offs_DQ(offset_Q) + + else: + if self._using_QWG(): + QWG_MW = self.instr_LutMan_MW.get_instr().AWG.get_instr() + chI = self.instr_LutMan_MW.get_instr().channel_I() + chQ = self.instr_LutMan_MW.get_instr().channel_Q() + chI_par = QWG_MW.parameters['ch%s_offset' % chI] + chQ_par = QWG_MW.parameters['ch%s_offset' % chQ] + + offset_I, offset_Q = cal_toolbox.mixer_carrier_cancellation( + SH=self.instr_SH.get_instr(), + source=self.instr_LO_mw.get_instr(), + MC=self.instr_MC.get_instr(), + chI_par=chI_par, + chQ_par=chQ_par, + ftarget=ftarget, maxiter=maxiter) + if update: + self.mw_mixer_offs_GI(offset_I) + self.mw_mixer_offs_GQ(offset_Q) + + else: + awg_ch = self.mw_awg_ch() + AWG.stop() + AWG.set('sigouts_{}_on'.format(awg_ch-1), 1) + AWG.set('sigouts_{}_on'.format(awg_ch+0), 1) + chGI_par = AWG.parameters['sigouts_{}_offset'.format(awg_ch-1)] + chGQ_par = AWG.parameters['sigouts_{}_offset'.format(awg_ch+0)] + + offset_I, offset_Q = cal_toolbox.mixer_carrier_cancellation( + SH=self.instr_SH.get_instr(), + source=self.instr_LO_mw.get_instr(), + MC=self.instr_MC.get_instr(), + chI_par=chGI_par, chQ_par=chGQ_par, + label='Mixer_offsets_drive'+self.msmt_suffix, + ftarget=ftarget, maxiter=maxiter) + + if update: + self.mw_mixer_offs_GI(offset_I) + self.mw_mixer_offs_GQ(offset_Q) + + return True + + def calibrate_mixer_offsets_RO(self, update: bool = True, + ftarget=-110) -> bool: + """ + Calibrates the mixer offset and updates the I and Q offsets in + the qubit object. + + Args: + update (bool): + if True updates values in the qubit object. + + ftarget (float): power of the signal at the LO frequency + for which the optimization is terminated + + Return: + success (bool): + returns True if succesful. Currently always + returns True (i.e., no sanity check implemented) + """ + + chI_par = self.instr_acquisition.get_instr().sigouts_0_offset + chQ_par = self.instr_acquisition.get_instr().sigouts_1_offset + + offset_I, offset_Q = cal_toolbox.mixer_carrier_cancellation( + SH=self.instr_SH.get_instr(), + source=self.instr_LO_ro.get_instr(), + MC=self.instr_MC.get_instr(), + chI_par=chI_par, + chQ_par=chQ_par, + x0=(0.05, 0.05), + ftarget=ftarget) + + if update: + self.ro_pulse_mixer_offs_I(offset_I) + self.ro_pulse_mixer_offs_Q(offset_Q) + return True + + def calibrate_mixer_offsets_LRU(self, + mixer_channels: list =['G', 'D'], + update: bool = True, + ftarget: int = -110, + maxiter: int = 300, + disable_metadata: bool = False)-> bool: + """ + Calibrates the mixer offset and updates the I and Q offsets in + the qubit object. + + Args: + mixer_channels (list): + No use in no-VSM case + With VSM specifies whether to calibrate offsets for both + gaussuan 'G' and derivarive 'D' channel + + update (bool): + should optimal values be set in the qubit object + + ftarget (float): power of the signal at the LO frequency + for which the optimization is terminated + """ + + # turn relevant channels on + + MW_LutMan = self.instr_LutMan_LRU.get_instr() + AWG = MW_LutMan.AWG.get_instr() + awg_ch = MW_LutMan.channel_I() + AWG.stop() + AWG.set('sigouts_{}_on'.format(awg_ch-1), 1) + AWG.set('sigouts_{}_on'.format(awg_ch+0), 1) + chGI_par = AWG.parameters['sigouts_{}_offset'.format(awg_ch-1)] + chGQ_par = AWG.parameters['sigouts_{}_offset'.format(awg_ch+0)] + + offset_I, offset_Q = cal_toolbox.mixer_carrier_cancellation( + SH=self.instr_SH.get_instr(), + source=self.instr_LO_LRU.get_instr(), + MC=self.instr_MC.get_instr(), + chI_par=chGI_par, chQ_par=chGQ_par, + label='Mixer_offsets_drive'+self.msmt_suffix, + ftarget=ftarget, maxiter=maxiter, + disable_metadata=disable_metadata) + + if update: + self.lru_mixer_offs_GI(offset_I) + self.lru_mixer_offs_GQ(offset_Q) + + return True + + def calibrate_mixer_offset_LRU_single_channel( + self, + ch_par, + currents: list = np.arange(-10e-3, 10e-3, .5e-3), + update: bool = True, + adaptive_sampling: bool = False, + ftarget: int =-100, + prepare: bool = True, + disable_metadata=False): + ''' + Calibrate mixer offset for single AWG with a + current source. This was developed for the + LRU signals in the Pagani setup. + "ch_par" should the current source parameter + associated to the qubit. + ''' + if prepare: + self.prepare_for_timedomain() + #turn the relevant soruce on + source = self.instr_LO_LRU.get_instr() + source.on() + frequency = source.frequency() + SH = self.instr_SH.get_instr() + MC = self.instr_MC.get_instr() + detector = det.Signal_Hound_fixed_frequency( + SH, frequency=(source.frequency()), + Navg=5, delay=0.0, prepare_for_each_point=False) + MC.set_sweep_function(ch_par) + MC.set_detector_function(detector) + if adaptive_sampling: + goal = l1dm.mk_min_threshold_goal_func( + max_pnts_beyond_threshold=10) + minimize = True + loss = l1dm.mk_minimization_loss_func( + # Just in case it is ever changed to maximize + threshold=(-1) ** (minimize + 1) * ftarget, + interval_weight=50.0, + ) + adaptive_num_pnts_uniform=0 + adaptive_num_pts_max = 50 + bounds = (np.min(currents), np.max(currents)) + # adaptive sampler + # par_idx = 1 # Moved to method's arguments + adaptive_pars = { + "adaptive_function": l1dm.Learner1D_Minimizer, + "goal": lambda l: goal(l) or l.npoints > adaptive_num_pts_max, + "bounds": bounds, + "loss_per_interval": loss, + "minimize": minimize, + # A few uniform points to make more likely to find the peak + "X0": np.linspace( + np.min(bounds), + np.max(bounds), + adaptive_num_pnts_uniform + 2)[1:-1] + } + MC.set_sweep_function(ch_par) + MC.set_adaptive_function_parameters(adaptive_pars) + MC.set_sweep_points(currents) + label = f'Offset_calibration_LRU_single_channel_{self.name}' + MC.run(label, mode="adaptive", + disable_snapshot_metadata = disable_metadata) + else: + # Just single 1D sweep + MC.set_sweep_points(currents) + label = f'Offset_calibration_LRU_single_channel_{self.name}' + MC.run(label, mode="1D", + disable_snapshot_metadata = disable_metadata) + # Run analysis + a = ma2.Basic1DAnalysis() + powers = a.raw_data_dict['measured_values'][0] + currents = a.raw_data_dict['xvals'][0] + opt_curr = currents[np.argmin(powers)] + if update: + self.lru_mixer_offs_GI(opt_curr) + ch_par(opt_curr) + return True + + def calibrate_mixer_skewness_drive(self, MC=None, + mixer_channels: list = ['G', 'D'], + x0: list = [1.0, 0.0], + cma_stds = None, + maxfevals: int = 250, + update: bool = True, + prepare=True)-> bool: + """ + Calibrates the mixer skewness and updates values in the qubit object. + + Args: + MC (MeasurementControl): + instance of Measurement Control + + mixer_channels (list): + list of strings indicating what channels to + calibrate. In VSM case 'G' and/or 'D' can be specified. + In no-VSM case mixer_channels is alway set to ['G']. + + update (bool): + if True updates values in the qubit object. + + Return: + success (bool): + returns True if succesful. Currently always + returns True (i.e., no sanity check implemented) + """ + + # turn relevant channels on + if MC == None: + MC = self.instr_MC.get_instr() + + if prepare: + self.prepare_for_timedomain() + # Load the sequence + CCL = self.instr_CC.get_instr() + p = sqo.CW_tone( + qubit_idx=self.cfg_qubit_nr(), + platf_cfg=self.cfg_openql_platform_fn()) + CCL.eqasm_program(p.filename) + CCL.start() + + if cma_stds is None: + cma_stds = [1/self.mw_channel_amp(), 10] + if self.cfg_with_vsm(): + # Open the VSM channel + VSM = self.instr_VSM.get_instr() + ch_in = self.mw_vsm_ch_in() + # module 8 is hardcoded for use mixer calls (signal hound) + VSM.set('mod8_marker_source'.format(ch_in), 'int') + VSM.set('mod8_ch{}_marker_state'.format(ch_in), 'on') + VSM.set('mod8_ch{}_gaussian_amp'.format(ch_in), 1.0) + VSM.set('mod8_ch{}_derivative_amp'.format(ch_in), 1.0) + else: + mixer_channels = ['G'] + + mw_lutman = self.instr_LutMan_MW.get_instr() + mw_lutman.mixer_apply_predistortion_matrix(True) + # # Define the parameters that will be varied + for mixer_ch in mixer_channels: + if self.cfg_with_vsm(): + alpha = mw_lutman.parameters['{}_mixer_alpha'.format(mixer_ch)] + phi = mw_lutman.parameters['{}_mixer_phi'.format(mixer_ch)] + if mixer_ch == 'G': + mw_lutman.sq_G_amp(.5) + mw_lutman.sq_D_amp(0) + elif mixer_ch == 'D': + mw_lutman.sq_G_amp(0) + mw_lutman.sq_D_amp(.5) + else: + alpha = mw_lutman.parameters['mixer_alpha'] + phi = mw_lutman.parameters['mixer_phi'] + mw_lutman.sq_amp(.5) + + spurious_sideband_freq = self.freq_qubit() - 2*self.mw_freq_mod() + + # This is to ensure the square waveform is pulse 10! + mw_lutman.set_default_lutmap() + + if self._using_QWG(): + prepare_function = mw_lutman.apply_mixer_predistortion_corrections + prepare_function_kwargs = {'wave_dict': {}} + else: + def load_square(): + AWG = mw_lutman.AWG.get_instr() + AWG.stop() + # When using real-time modulation, mixer_alpha is encoded in channel amplitudes. + # Loading amplitude ensures new amplitude will be calculated with mixer_alpha. + if mw_lutman.cfg_sideband_mode() == 'real-time': + mw_lutman._set_channel_amp(mw_lutman._get_channel_amp()) + + # Codeword 10 is hardcoded in the generate CCL config + # mw_lutman.load_waveform_realtime(wave_id='square') + # mw_lutman.load_waveforms_onto_AWG_lookuptable( + # force_load_sequencer_program=False) + mw_lutman.load_waveform_onto_AWG_lookuptable( + wave_id='square', regenerate_waveforms=True) + AWG.start() + prepare_function = load_square + prepare_function_kwargs = {} + + detector = det.Signal_Hound_fixed_frequency( + self.instr_SH.get_instr(), spurious_sideband_freq, + prepare_for_each_point=True, + Navg=5, + prepare_function=prepare_function, + prepare_function_kwargs=prepare_function_kwargs) + # mw_lutman.load_waveform_realtime, + # prepare_function_kwargs={'waveform_key': 'square', 'wf_nr': 10}) + ad_func_pars = {'adaptive_function': cma.fmin, + 'x0': x0, + 'sigma0': 1, + 'minimize': True, + 'noise_handler': cma.NoiseHandler(N=2), + 'options': {'cma_stds': cma_stds, + 'maxfevals': maxfevals}} # Should be enough for mixer skew + + MC.set_sweep_functions([alpha, phi]) + #MC.set_sweep_function(alpha) + MC.set_detector_function(detector) # sets test_detector + MC.set_adaptive_function_parameters(ad_func_pars) + MC.set_sweep_points(np.linspace(0,2,300)) + MC.run( + name='Spurious_sideband_{}{}'.format( + mixer_ch, self.msmt_suffix), + mode='adaptive') + # For the figure + ma.OptimizationAnalysis_v2() + a = ma.OptimizationAnalysis(auto=True, label='Spurious_sideband') + alpha = a.optimization_result[0][0] + phi = a.optimization_result[0][1] + if update: + self.set('mw_{}_mixer_alpha'.format(mixer_ch), alpha) + self.set('mw_{}_mixer_phi'.format(mixer_ch), phi) + + return True + + def calibrate_mixer_skewness_RO(self, update=True, prepare=True, + maxfevals = 150): + """ + Calibrates the mixer skewness using mixer_skewness_cal_UHFQC_adaptive + see calibration toolbox for details + + Args: + update (bool): + if True updates values in the qubit object. + + Return: + success (bool): + returns True if succesful. Currently always + returns True (i.e., no sanity check implemented) + """ + CCL = self.instr_CC.get_instr() + if prepare: + self.prepare_for_timedomain() + p = sqo.CW_RO_sequence( + qubit_idx=self.cfg_qubit_nr(), + platf_cfg=self.cfg_openql_platform_fn()) + CCL.eqasm_program(p.filename) + CCL.start() + + # using the restless tuning sequence + # self.prepare_for_timedomain() + # p = sqo.randomized_benchmarking( + # self.cfg_qubit_nr(), self.cfg_openql_platform_fn(), + # nr_cliffords=[1], + # net_clifford=1, nr_seeds=1, restless=True, cal_points=False) + # self.instr_CC.get_instr().eqasm_program(p.filename) + # self.instr_CC.get_instr().start() + + LutMan = self.instr_LutMan_RO.get_instr() + LutMan.mixer_apply_predistortion_matrix(True) + MC = self.instr_MC.get_instr() + # S1 = swf.lutman_par_UHFQC_dig_trig( + # LutMan, LutMan.mixer_alpha, single=False, run=True) + # S2 = swf.lutman_par_UHFQC_dig_trig( + # LutMan, LutMan.mixer_phi, single=False, run=True) + S1 = swf.lutman_par_UHFQC_dig_trig( + LutMan=LutMan, + LutMan_parameter=LutMan.parameters['mixer_alpha'], + single=False, run=True) + S2 = swf.lutman_par_UHFQC_dig_trig( + LutMan=LutMan, + LutMan_parameter=LutMan.parameters['mixer_phi'], + single=False, run=True) + + detector = det.Signal_Hound_fixed_frequency( + self.instr_SH.get_instr(), + frequency=self.ro_freq() - 2*self.ro_freq_mod(), + Navg=5, delay=0, + prepare_for_each_point=True) + + ad_func_pars = {'adaptive_function': nelder_mead, + 'x0': [1.0, 0.0], + 'initial_step': [.15, 10], + 'no_improve_break': 15, + 'minimize': True, + 'maxiter': maxfevals} + MC.set_sweep_functions([S1, S2]) + MC.set_detector_function(detector) # sets test_detector + MC.set_adaptive_function_parameters(ad_func_pars) + MC.run(name='Spurious_sideband', mode='adaptive') + a = ma.OptimizationAnalysis(auto=True, label='Spurious_sideband') + alpha = a.optimization_result[0][0] + phi = a.optimization_result[0][1] + + if update: + self.ro_pulse_mixer_phi.set(phi) + self.ro_pulse_mixer_alpha.set(alpha) + LutMan.mixer_alpha(alpha) + LutMan.mixer_phi(phi) + + def calibrate_mw_pulses_basic(self, + cal_steps=['offsets', 'amp_coarse', 'freq', + 'drag', 'amp_fine', 'amp_fine', + 'amp_fine'], + kw_freqs={'steps': [1, 3, 10, 30, 100, + 300, 1000]}, + kw_amp_coarse={'amps': np.linspace(0, 1, 31)}, + kw_amp_fine={'update': True}, + soft_avg_allxy=3, + kw_offsets={'ftarget': -120}, + kw_skewness={}, + kw_motzoi={'update': True}, + f_target_skewness=-120): + + """ + Performs a standard calibration of microwave pulses consisting of + + - mixer offsets + - mixer skewness + - pulse ampl coarse (rabi) + - frequency (ramsey) + - motzoi + - ampl fine (flipping) + + - AllXY (to verify) + + Note that this is a basic calibration and does not involve fine tuning + to ~99.9% and only works if the qubit is well behaved. + """ + for this_step in cal_steps: + if this_step == 'offsets': + self.calibrate_mixer_offsets_drive(**kw_offsets) + elif this_step == 'skewness': + self.calibrate_mixer_skewness_drive(**kw_skewness) + elif this_step == 'amp_coarse': + self.calibrate_mw_pulse_amplitude_coarse(**kw_amp_coarse) + elif this_step == 'freq': + self.find_frequency('ramsey', **kw_freqs) + elif this_step == 'drag': + self.calibrate_motzoi(**kw_motzoi) + elif this_step == 'amp_fine': + self.measure_flipping(**kw_amp_fine) + old_soft_avg = self.ro_soft_avg() + self.ro_soft_avg(soft_avg_allxy) + self.measure_allxy() + self.ro_soft_avg(old_soft_avg) + return True + + def calibrate_ssro_coarse(self, MC=None, + nested_MC=None, + freqs=None, + amps=None, + analyze: bool = True, + update: bool = True): + ''' + Performs a 2D sweep of .ro_freq and .ro_pulse_amp and + measures SSRO parameters (SNR, F_a, F_d). + After the sweep is done, it sets the parameters for which the assignment + fidelity was maximum. + + Args: + freq (array): + Range of frequencies of sweep. + + amps (array): + Range of amplitudes of sweep. + ''' + + if MC is None: + MC = self.instr_MC.get_instr() + + if nested_MC is None: + nested_MC = self.instr_nested_MC.get_instr() + + if freqs is None: + if self.dispersive_shift() is not None: + freqs = np.arange(-2*abs(self.dispersive_shift()), + abs(self.dispersive_shift()), .5e6) + self.freq_res() + else: + raise ValueError('self.dispersive_shift is None. Please specify\ + range of sweep frequencies.') + + if amps is None: + amps = np.linspace(.001, .5, 31) + + ro_lm = self.find_instrument(self.instr_LutMan_RO()) + q_idx = self.cfg_qubit_nr() + swf1 = swf.RO_freq_sweep(name='RO frequency', + qubit=self, + ro_lutman=ro_lm, + idx=q_idx, + parameter=self.ro_freq) + + nested_MC.set_sweep_function(swf1) + nested_MC.set_sweep_points(freqs) + nested_MC.set_sweep_function_2D(self.ro_pulse_amp) + nested_MC.set_sweep_points_2D(amps) + + d = det.Function_Detector(self.measure_ssro, + result_keys=['SNR', 'F_a', 'F_d'], + value_names=['SNR', 'F_a', 'F_d'], + value_units=['a.u.', 'a.u.', 'a.u.'], + msmt_kw={'prepare': False} + ) + nested_MC.set_detector_function(d) + nested_MC.run(name='RO_coarse_tuneup', mode='2D') + + if analyze is True: + # Analysis + a = ma.TwoD_Analysis(label='RO_coarse_tuneup', auto=False) + # Get best parameters + a.get_naming_and_values_2D() + arg = np.argmax(a.measured_values[1]) + index = np.unravel_index(arg, (len(a.sweep_points), + len(a.sweep_points_2D))) + best_freq = a.sweep_points[index[0]] + best_amp = a.sweep_points_2D[index[1]] + a.run_default_analysis() + print('Frequency: {}, Amplitude: {}'.format(best_freq, best_amp)) + + if update is True: + self.ro_freq(best_freq) + self.ro_pulse_amp(best_amp) + + return True + + def calibrate_ssro_pulse_duration(self, MC=None, + nested_MC=None, + amps=None, + amp_lim=None, + times= None, + use_adaptive: bool = True, + n_points: int = 80, + analyze: bool = True, + update: bool = True): + ''' + Calibrates the RO pulse duration by measuring the assignment fidelity of + SSRO experiments as a function of the RO pulse duration and amplitude. + For each set of parameters, the routine calibrates optimal weights and + then extracts readout fidelity. + This measurement can be performed using an adaptive sampler + (use_adaptive=True) or a regular 2D parameter sweep (use_adaptive=False). + Designed to be used in the GBT node 'SSRO Pulse Duration'. + + Args: + amps (array): + If using 2D sweep: + Set of RO amplitudes sampled in the 2D sweep. + If using adaptive sampling: + Minimum and maximum (respectively) of the RO amplitude range + used in the adaptive sampler. + + times (array): + If using 2D sweep: + Set of RO pulse durations sampled in the 2D sweep. + If using adaptive sampling: + Minimum and maximum (respectively) of the RO pulse duration + range used in the adaptive sampler. + + use_adaptive (bool): + Boolean that sets the sampling mode. Set to "False" for a + regular 2D sweep or set to "True" for adaptive sampling. + + n_points: + Only relevant in the adaptive sampling mode. Sets the maximum + number of points sampled. + ''' + + if MC is None: + MC = self.instr_MC.get_instr() + + if nested_MC is None: + nested_MC = self.instr_nested_MC.get_instr() + + if times is None: + times = np.arange(50e-9, 801e-9, 10e-9) + + if amps is None: + amps = np.linspace(.01,.25,11) + if amp_lim is None: + amp_lim = (0.01, 0.25) + ###################### + # Experiment + ###################### + nested_MC.set_sweep_functions([self.ro_pulse_length, + self.ro_pulse_amp]) + d = det.Function_Detector(self.calibrate_optimal_weights, + result_keys=['F_a','F_d','SNR'], + value_names=['F_a','F_d','SNR'], + value_units=['a.u.','a.u.','a.u.']) + nested_MC.set_detector_function(d) + # Use adaptive sampling + if use_adaptive is True: + # Adaptive sampler cost function + loss_per_simplex = mk_minimization_loss_func() + goal = mk_minimization_goal_func() + + nested_MC.set_adaptive_function_parameters( + {'adaptive_function': LearnerND_Minimizer, + 'goal': lambda l: goal(l) or l.npoints > n_points, + 'loss_per_simplex': loss_per_simplex, + 'bounds': [(50e-9, 801e-9), amp_lim], + 'minimize': False + }) + nested_MC.run(name='RO_duration_tuneup_{}'.format(self.name), + mode='adaptive') + # Use standard 2D sweep + else: + nested_MC.set_sweep_points(times) + nested_MC.set_sweep_points_2D(amps) + nested_MC.run(name='RO_duration_tuneup_{}'.format(self.name), + mode='2D') + ##################### + # Analysis + ##################### + if analyze is True: + if use_adaptive is True: + A = ma2.Readout_landspace_Analysis(label='RO_duration_tuneup') + optimal_pulse_duration = A.proc_data_dict['quantities_of_interest']['Optimal_parameter_X'] + optimal_pulse_amplitude = A.proc_data_dict['quantities_of_interest']['Optimal_parameter_Y'] + self.ro_pulse_length(optimal_pulse_duration) + self.ro_pulse_amp(optimal_pulse_amplitude) + else: + A = ma.TwoD_Analysis(label='RO_duration_tuneup', auto=True) + return True + + def calibrate_ssro_fine(self, MC=None, + nested_MC=None, + start_freq=None, + start_amp=None, + start_freq_step=None, + start_amp_step=None, + threshold: float = .99, + analyze: bool = True, + update: bool = True): + ''' + Runs an optimizer routine on the SSRO assignment fidelity of the + .ro_freq and .ro_pulse_amp parameters. + Intended to be used in the "SSRO Optimization" node of GBT. + + Args: + start_freq (float): + Starting frequency of the optmizer. + + start_amp (float): + Starting amplitude of the optimizer. + + start_freq_step (float): + Starting frequency step of the optmizer. + + start_amp_step (float): + Starting amplitude step of the optimizer. + + threshold (float): + Fidelity thershold after which the optimizer stops iterating. + ''' + + if MC is None: + MC = self.instr_MC.get_instr() + + if nested_MC is None: + nested_MC = self.instr_nested_MC.get_instr() + + if start_freq_step is None: + if start_freq is None: + start_freq = self.ro_freq() + start_freq_step = 0.1e6 + else: + raise ValueError('Must provide start frequency step if start\ + frequency is specified.') + + if start_amp_step is None: + if start_amp is None: + start_amp = self.ro_pulse_amp() + start_amp_step = 0.01 + else: + raise ValueError('Must provide start amplitude step if start\ + amplitude is specified.') + if start_amp is None: + start_amp = self.ro_freq() + if start_amp is None: + start_amp = self.ro_pulse_amp() + + nested_MC.set_sweep_functions([self.ro_freq, self.ro_pulse_amp]) + + d = det.Function_Detector(self.calibrate_optimal_weights, + result_keys=['F_a'], + value_names=['F_a'], + value_units=['a.u.']) + nested_MC.set_detector_function(d) + + ad_func_pars = {'adaptive_function': nelder_mead, + 'x0': [self.ro_freq(), self.ro_pulse_amp()], + 'initial_step': [start_freq_step, start_amp_step], + 'no_improve_break': 10, + 'minimize': False, + 'maxiter': 20, + 'f_termination': threshold} + nested_MC.set_adaptive_function_parameters(ad_func_pars) + + nested_MC.set_optimization_method('nelder_mead') + nested_MC.run(name='RO_fine_tuneup', mode='adaptive') + + if analyze is True: + ma.OptimizationAnalysis(label='RO_fine_tuneup') + return True + + def calibrate_ro_acq_delay(self, MC=None, + analyze: bool = True, + prepare: bool = True, + disable_metadata: bool = False): + """ + Calibrates the ro_acq_delay parameter for the readout. + For that it analyzes the transients. + + """ + + self.ro_acq_delay(0) # set delay to zero + old_pow = self.ro_pulse_amp() + self.ro_pulse_amp(0.5) + + if MC is None: + MC = self.instr_MC.get_instr() + # if plot_max_time is None: + # plot_max_time = self.ro_acq_integration_length()+250e-9 + + if prepare: + self.prepare_for_timedomain() + p = sqo.off_on( + qubit_idx=self.cfg_qubit_nr(), pulse_comb='off', + initialize=False, + platf_cfg=self.cfg_openql_platform_fn()) + self.instr_CC.get_instr().eqasm_program(p.filename) + else: + p = None # object needs to exist for the openql_sweep to work + + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name='Transient time', unit='s', + upload=prepare) + MC.set_sweep_function(s) + + if 'UHFQC' in self.instr_acquisition(): + sampling_rate = 1.8e9 + else: + raise NotImplementedError() + + MC.set_sweep_points(np.arange(self.input_average_detector.nr_samples) / + sampling_rate) + MC.set_detector_function(self.input_average_detector) + MC.run(name='Measure_Acq_Delay_{}'.format(self.msmt_suffix), + disable_snapshot_metadata=disable_metadata) + + self.ro_pulse_amp(old_pow) + + if analyze: + a = ma2.RO_acquisition_delayAnalysis(qubit_name=self.name) + # Delay time is averaged over the two quadratures. + delay_time = (a.proc_data_dict['I_pulse_start'] + + a.proc_data_dict['Q_pulse_start'])/2 + self.ro_acq_delay(delay_time) + return True + + def calibrate_optimal_weights( + self, + MC=None, + f_state: bool = False, + verify: bool = True, + analyze: bool = True, + update: bool = True, + no_figs: bool = False, + optimal_IQ: bool = False, + prepare: bool = True, + disable_metadata: bool = False, + nr_shots_per_case: int = 2**13, + post_select: bool = False, + averages: int = 2**15, + post_select_threshold: float = None, + ) -> bool: + """ + Measures readout transients for the qubit in ground and excited state to indicate + at what times the transients differ. Based on the transients calculates weights + that are used to weigh measuremet traces to maximize the SNR. + + Args: + optimal_IQ (bool): + if set to True sets both the I and Q weights of the optimal + weight functions for the verification experiment. + A good sanity check is that when using optimal IQ one expects + to see no signal in the Q quadrature of the verification + SSRO experiment. + verify (bool): + indicates whether to run measure_ssro at the end of the routine + to find the new SNR and readout fidelities with optimized weights + + update (bool): + specifies whether to update the weights in the qubit object + """ + log.info('Calibrating optimal weights for {}'.format(self.name)) + if MC is None: + MC = self.instr_MC.get_instr() + + # Ensure that enough averages are used to get accurate weights + old_avg = self.ro_acq_averages() + self.ro_acq_averages(averages) + + if prepare: + self.prepare_for_timedomain() + + if f_state: + cases = ['off', 'on', 'two'] + else: + cases = ['off', 'on'] + transients = self.measure_transients( + MC=MC, + analyze=analyze, + cases=cases, + depletion_analysis=False, + prepare=False, + disable_metadata=disable_metadata) + + if analyze: + # Old analysis + # ma.Input_average_analysis(IF=self.ro_freq_mod()) + # New analysis 202208 (Jorge) + fp_off = ma.a_tools.latest_data(f'transients_{self.name}_0') + fp_on = ma.a_tools.latest_data(f'transients_{self.name}_1') + ts_off = '_'.join([fp_off.split('\\')[-2], + fp_off.split('\\')[-1].split('_')[0]]) + ts_on = '_'.join([fp_on.split('\\')[-2], + fp_on.split('\\')[-1].split('_')[0]]) + # Get input readout waveform + ro_lm = self.instr_LutMan_RO.get_instr() + pulse_id = ro_lm.pulse_type()+f'_R{self.cfg_qubit_nr()}' + ro_waveform = ro_lm._wave_dict[pulse_id] + if f_state: + fp_two = ma.a_tools.latest_data(f'transients_{self.name}_2') + ts_two = '_'.join([fp_two.split('\\')[-2], + fp_two.split('\\')[-1].split('_')[0]]) + a = ma2.ra.Optimal_integration_weights_analysis( + t_start=ts_off, t_stop=ts_two, + IF=self.ro_freq_mod(), input_waveform=ro_waveform) + else: + a = ma2.ra.Optimal_integration_weights_analysis( + t_start=ts_off, t_stop=ts_on, + IF=self.ro_freq_mod(), input_waveform=ro_waveform) + + self.ro_acq_averages(old_avg) + + if update: + assert analyze == True + self.ro_acq_weight_func_I(np.array(a.qoi['Weights_I_s'])) + self.ro_acq_weight_func_Q(np.array(a.qoi['Weights_Q_s'])) + if f_state: + self.ro_acq_weight_func_I_2(np.array(a.qoi['Weights_I_ef_s'])) + self.ro_acq_weight_func_Q_2(np.array(a.qoi['Weights_Q_ef_s'])) + if optimal_IQ or f_state: + self.ro_acq_weight_type('optimal IQ') + else: + self.ro_acq_weight_type('optimal') + if verify: + self._prep_ro_integration_weights() + self._prep_ro_instantiate_detectors() + ssro_dict = self.measure_ssro( + f_state=f_state, + no_figs=no_figs, + update=update, + prepare=True, + disable_metadata=disable_metadata, + nr_shots_per_case=nr_shots_per_case, + post_select=post_select) + return ssro_dict + if verify: + log.warning('Not verifying as settings were not updated.') + return True + + ##################################################### + # "measure_" methods below + ##################################################### + def measure_heterodyne_spectroscopy(self, freqs, MC=None, + analyze=True, close_fig=True, + label='', + prepare_for_continuous_wave: bool = True, + disable_metadata: bool = True, + ): + """ + Measures a transmission through the feedline as a function of frequency. + Usually used to find and characterize the resonators in routines such as + find_resonators or find_resonator_frequency. + + Args: + freqs (array): + list of frequencies to sweep over + + analyze (bool): + indicates whether to perform a hanger model + fit to the data + + label (str): + suffix to append to the measurement label + """ + UHFQC = self.instr_acquisition.get_instr() + if prepare_for_continuous_wave: + self.prepare_for_continuous_wave() + if MC is None: + MC = self.instr_MC.get_instr() + # Starting specmode if set in config + if self.cfg_spec_mode(): + UHFQC.spec_mode_on(acq_length=self.ro_acq_integration_length(), + IF=self.ro_freq_mod(), + ro_amp=self.ro_pulse_amp_CW()) + # Snippet here to create and upload the CCL instructions + CCL = self.instr_CC.get_instr() + CCL.stop() + p = sqo.CW_RO_sequence(qubit_idx=self.cfg_qubit_nr(), + platf_cfg=self.cfg_openql_platform_fn()) + CCL.eqasm_program(p.filename) + # CCL gets started in the int_avg detector + + MC.set_sweep_function(swf.Heterodyne_Frequency_Sweep_simple( + MW_LO_source=self.instr_LO_ro.get_instr(), + IF=self.ro_freq_mod())) + MC.set_sweep_points(freqs) + + self.int_avg_det_single._set_real_imag(False) + MC.set_detector_function(self.int_avg_det_single) + MC.run(name='Resonator_scan'+self.msmt_suffix+label, disable_snapshot_metadata=disable_metadata) + # Stopping specmode + if self.cfg_spec_mode(): + UHFQC.spec_mode_off() + self._prep_ro_pulse(upload=True) + if analyze: + ma.Homodyne_Analysis(label=self.msmt_suffix, close_fig=close_fig) + + def measure_resonator_power(self, freqs, powers, MC=None, + analyze: bool = True, close_fig: bool = True, + label: str = ''): + """ + Mesures the readout resonator with UHFQC as a function of the pulse power. + The pulse power is controlled by changing the amplitude of the UHFQC-generated + waveform. + + Args: + freqs (array): + list of freqencies to sweep over + + powers (array): + powers of the readout pulse to sweep over. The power is adjusted + by changing the amplitude of the UHFQC output channels. Thereby + the range of powers is limited by the dynamic range of mixers. + """ + self.prepare_for_continuous_wave() + if MC is None: + MC = self.instr_MC.get_instr() + # Snippet here to create and upload the CCL instructions + CCL = self.instr_CC.get_instr() + CCL.stop() + p = sqo.CW_RO_sequence(qubit_idx=self.cfg_qubit_nr(), + platf_cfg=self.cfg_openql_platform_fn()) + CCL.eqasm_program(p.filename) + # CCL gets started in the int_avg detector + + MC.set_sweep_function(swf.Heterodyne_Frequency_Sweep_simple( + MW_LO_source=self.instr_LO_ro.get_instr(), + IF=self.ro_freq_mod())) + MC.set_sweep_points(freqs) + + ro_lm = self.instr_LutMan_RO.get_instr() + m_amp_par = ro_lm.parameters[ + 'M_amp_R{}'.format(self.cfg_qubit_nr())] + s2 = swf.lutman_par_dB_attenuation_UHFQC_dig_trig( + LutMan=ro_lm, LutMan_parameter=m_amp_par) + MC.set_sweep_function_2D(s2) + MC.set_sweep_points_2D(powers) + self.int_avg_det_single._set_real_imag(False) + MC.set_detector_function(self.int_avg_det_single) + MC.run(name='Resonator_power_scan'+self.msmt_suffix+label, mode='2D') + if analyze: + ma.TwoD_Analysis(label='Resonator_power_scan', + close_fig=close_fig, normalize=True) + + def measure_photon_number_splitting(self, freqs, powers, MC=None, + analyze: bool = True, close_fig: bool = True): + """ + Mesures the CW qubit spectrosopy as a function of the RO pulse power + to find a photon splitting. + + Refs: + Schuster Nature 445, 515–518 (2007) + (note that in the paper RO resonator has lower frequency than the qubit) + + Args: + freqs (array): + list of freqencies to sweep over + + powers (array): + powers of the readout pulse to sweep over. The power is adjusted + by changing the amplitude of the UHFQC output channels. Thereby + the range of powers is limited by the dynamic range of mixers. + """ + + self.prepare_for_continuous_wave() + if MC is None: + MC = self.instr_MC.get_instr() + # Snippet here to create and upload the CCL instructions + CCL = self.instr_CC.get_instr() + CCL.stop() + p = sqo.CW_RO_sequence(qubit_idx=self.cfg_qubit_nr(), + platf_cfg=self.cfg_openql_platform_fn()) + CCL.eqasm_program(p.filename) + # CCL gets started in the int_avg detector + spec_source = self.instr_spec_source.get_instr() + spec_source.on() + MC.set_sweep_function(spec_source.frequency) + MC.set_sweep_points(freqs) + + ro_lm = self.instr_LutMan_RO.get_instr() + m_amp_par = ro_lm.parameters[ + 'M_amp_R{}'.format(self.cfg_qubit_nr())] + s2 = swf.lutman_par_dB_attenuation_UHFQC_dig_trig( + LutMan=ro_lm, LutMan_parameter=m_amp_par) + MC.set_sweep_function_2D(s2) + MC.set_sweep_points_2D(powers) + self.int_avg_det_single._set_real_imag(False) + MC.set_detector_function(self.int_avg_det_single) + label = 'Photon_number_splitting' + MC.run(name=label+self.msmt_suffix, mode='2D') + spec_source.off() + if analyze: + ma.TwoD_Analysis(label=label, + close_fig=close_fig, normalize=True) + + def measure_resonator_frequency_dac_scan(self, freqs, dac_values, MC=None, + analyze: bool = True, close_fig: bool = True, + fluxChan=None, label=''): + """ + Performs the resonator spectroscopy as a function of the current applied + to the flux bias line. + + Args: + freqs (array): + list of freqencies to sweep over + + dac_values (array): + list of the DAC values (current values) to sweep over + + fluxChan (str): + channel of the instrument controlling the flux to sweep. By default + the channel used is specified by self.fl_dc_ch. + + analyze (bool): + indicates whether to generate colormaps of the measured data + + label (str): + suffix to append to the measurement label + + Relevant qubit parameters: + instr_FluxCtrl (str): + instrument controlling the current bias + + fluxChan (str): + chanel of the flux control instrument corresponding to the qubit + """ + self.prepare_for_continuous_wave() + if MC is None: + MC = self.instr_MC.get_instr() + # Snippet here to create and upload the CCL instructions + CCL = self.instr_CC.get_instr() + CCL.stop() + p = sqo.CW_RO_sequence(qubit_idx=self.cfg_qubit_nr(), + platf_cfg=self.cfg_openql_platform_fn()) + CCL.eqasm_program(p.filename) + # CCL gets started in the int_avg detector + + MC.set_sweep_function(swf.Heterodyne_Frequency_Sweep_simple( + MW_LO_source=self.instr_LO_ro.get_instr(), + IF=self.ro_freq_mod())) + MC.set_sweep_points(freqs) + + if 'ivvi' in self.instr_FluxCtrl().lower(): + IVVI = self.instr_FluxCtrl.get_instr() + dac_par = IVVI.parameters['dac{}'.format(self.fl_dc_ch())] + else: + # Assume the flux is controlled using an SPI rack + fluxcontrol = self.instr_FluxCtrl.get_instr() + if fluxChan == None: + dac_par = fluxcontrol.parameters[(self.fl_dc_ch())] + else: + dac_par = fluxcontrol.parameters[(fluxChan)] + + MC.set_sweep_function_2D(dac_par) + MC.set_sweep_points_2D(dac_values) + self.int_avg_det_single._set_real_imag(False) + MC.set_detector_function(self.int_avg_det_single) + MC.run(name='Resonator_dac_scan'+self.msmt_suffix+label, mode='2D') + if analyze: + ma.TwoD_Analysis(label='Resonator_dac_scan', close_fig=close_fig) + + def measure_qubit_frequency_dac_scan(self, freqs, dac_values, + mode='pulsed_marked', MC=None, + analyze=True, fluxChan=None, close_fig=True, + nested_resonator_calibration=False, + nested_resonator_calibration_use_min=False, + resonator_freqs=None, + trigger_idx= None, + update=True): + """ + Performs the qubit spectroscopy while changing the current applied + to the flux bias line. + + Args: + freqs (array): + MW drive frequencies to sweep over + + dac_values (array): + values of the current to sweep over + + mode (str {'pulsed_mixer', 'CW', 'pulsed_marked'}): + specifies the spectroscopy mode (cf. measure_spectroscopy method) + + fluxChan (str): + Fluxchannel that is varied. Defaults to self.fl_dc_ch + + nested_resonator_calibration (bool): + specifies whether to track the RO resonator + frequency (which itself is flux-dependent) + + nested_resonator_calibration_use_min (bool): + specifies whether to use the resonance + minimum in the nested routine + + resonator_freqs (array): + manual specifications of the frequencies over in which to + search for RO resonator in the nested routine + + analyze (bool): + indicates whether to generate colormaps of the measured data + + label (str): + suffix to append to the measurement label + + Relevant qubit parameters: + instr_FluxCtrl (str): + instrument controlling the current bias + + fluxChan (str): + chanel of the flux control instrument corresponding to the qubit + """ + + if mode == 'pulsed_mixer': + old_channel_amp = self.mw_channel_amp() + self.mw_channel_amp(1) + self.prepare_for_timedomain() + self.mw_channel_amp(old_channel_amp) + elif mode == 'CW' or mode == 'pulsed_marked': + self.prepare_for_continuous_wave() + else: + log.error('Mode {} not recognized'.format(mode)) + if MC is None: + MC = self.instr_MC.get_instr() + if trigger_idx is None: + trigger_idx = self.cfg_qubit_nr() + + # Snippet here to create and upload the CCL instructions + CCL = self.instr_CC.get_instr() + if mode == 'pulsed_marked': + p = sqo.pulsed_spec_seq_marked( + qubit_idx=self.cfg_qubit_nr(), + spec_pulse_length=self.spec_pulse_length(), + platf_cfg=self.cfg_openql_platform_fn(), + trigger_idx=trigger_idx) + else: + p = sqo.pulsed_spec_seq( + qubit_idx=self.cfg_qubit_nr(), + spec_pulse_length=self.spec_pulse_length(), + platf_cfg=self.cfg_openql_platform_fn()) + + CCL.eqasm_program(p.filename) + # CCL gets started in the int_avg detector + if 'ivvi' in self.instr_FluxCtrl().lower(): + if fluxChan is None: + IVVI = self.instr_FluxCtrl.get_instr() + dac_par = IVVI.parameters['dac{}'.format(self.fl_dc_ch())] + else: + dac_par = IVVI.parameters[fluxChan] + elif 'AWG8' in self.instr_FluxCtrl(): + # Biasing directly through HDAWG + HDAWG = self.instr_FluxCtrl.get_instr() + dac_par = HDAWG.parameters[self.fl_dc_ch()] + fluxcontrol = self.instr_FluxCtrl.get_instr() + else: + # Assume the flux is controlled using an SPI rack + fluxcontrol = self.instr_FluxCtrl.get_instr() + if fluxChan == None: + dac_par = fluxcontrol.parameters[(self.fl_dc_ch())] + else: + dac_par = fluxcontrol.parameters[(fluxChan)] + if mode == 'pulsed_mixer': + spec_source = self.instr_spec_source_2.get_instr() + spec_source.on() + else: + spec_source = self.instr_spec_source.get_instr() + spec_source.on() + # if mode == 'pulsed_marked': + # spec_source.pulsemod_state('On') + + MC.set_sweep_function(spec_source.frequency) + MC.set_sweep_points(freqs) + if nested_resonator_calibration: + res_updating_dac_par = swf.Nested_resonator_tracker( + qubit=self, + nested_MC=self.instr_nested_MC.get_instr(), + freqs=resonator_freqs, + par=dac_par, use_min=nested_resonator_calibration_use_min, + reload_sequence=True, sequence_file=p, cc=CCL) + MC.set_sweep_function_2D(res_updating_dac_par) + else: + MC.set_sweep_function_2D(dac_par) + MC.set_sweep_points_2D(dac_values) + self.int_avg_det_single._set_real_imag(False) + self.int_avg_det_single.always_prepare = True + MC.set_detector_function(self.int_avg_det_single) + MC.run(name='Qubit_dac_scan'+self.msmt_suffix, mode='2D') + if analyze: + ma.TwoD_Analysis(label='Qubit_dac_scan', + close_fig=close_fig) + a = ma2.da.DAC_analysis() + if update: + self.fl_dc_I0(a.dac_fit_res['sweetspot_dac']) + fluxcontrol.set(self.fl_dc_ch(), self.fl_dc_I0()) + + def measure_spectroscopy(self, freqs, mode='pulsed_marked', MC=None, + analyze=True, close_fig=True, label='', + prepare_for_continuous_wave=True, + disable_metadata=False): + """ + Performs a two-tone spectroscopy experiment where one tone is kept + fixed at the resonator readout frequency and another frequency is swept. + + args: + freqs (array) : Frequency range you want to sweep + mode (string): 'CW' - Continuous wave + 'pulsed_marked' - pulsed using trigger input of + spec source + 'pulsed_mixer' - pulsed using AWG and mixer + analyze: indicates whether to look for the peak in the data + and perform a fit + label: suffix to append to the measurement label + + This experiment can be performed in three different modes + Continuous wave (CW) + Pulsed, marker modulated + Pulsed, mixer modulated + + The mode argument selects which mode is being used and redirects the + arguments to the appropriate method. + """ + if mode == 'CW': + self.measure_spectroscopy_CW(freqs=freqs, MC=MC, + analyze=analyze, close_fig=close_fig, + label=label, + prepare_for_continuous_wave=prepare_for_continuous_wave, + disable_metadata=disable_metadata) + elif mode == 'pulsed_marked': + self.measure_spectroscopy_pulsed_marked( + freqs=freqs, MC=MC, + analyze=analyze, close_fig=close_fig, + label=label, + prepare_for_continuous_wave=prepare_for_continuous_wave, + disable_metadata=disable_metadata) + elif mode == 'pulsed_mixer': + self.measure_spectroscopy_pulsed_mixer( + freqs=freqs, MC=MC, + analyze=analyze, close_fig=close_fig, + label=label, + prepare_for_timedomain=prepare_for_continuous_wave, + disable_metadata=disable_metadata) + else: + log.error('Mode {} not recognized. Available modes: "CW", \ + "pulsed_marked", "pulsed_mixer"'.format(mode)) + + def measure_spectroscopy_CW(self, freqs, MC=None, + analyze=True, close_fig=True, label='', + prepare_for_continuous_wave=True, + disable_metadata=False): + """ + Does a CW spectroscopy experiment by sweeping the frequency of a + microwave source. + + Relevant qubit parameters: + instr_spec_source (RohdeSchwarz_SGS100A): + instrument used to apply CW excitation + + spec_pow (float): + power of the MW excitation at the output of the spec_source (dBm) + + label (str): + suffix to append to the measurement label + """ + UHFQC = self.instr_acquisition.get_instr() + if prepare_for_continuous_wave: + self.prepare_for_continuous_wave() + if MC is None: + MC = self.instr_MC.get_instr() + + # Starting specmode if set in config + if self.cfg_spec_mode(): + UHFQC.spec_mode_on(IF=self.ro_freq_mod(), + ro_amp=self.ro_pulse_amp_CW()) + + # Snippet here to create and upload the CCL instructions + CCL = self.instr_CC.get_instr() + p = sqo.pulsed_spec_seq( + qubit_idx=self.cfg_qubit_nr(), + spec_pulse_length=self.spec_pulse_length(), + platf_cfg=self.cfg_openql_platform_fn()) + CCL.eqasm_program(p.filename) + # CCL gets started in the int_avg detector + + spec_source = self.instr_spec_source.get_instr() + spec_source.on() + # Set marker mode off for CW: + if not spec_source.get_idn()['model']=='E8257D': + spec_source.pulsemod_state('Off') + + MC.set_sweep_function(spec_source.frequency) + MC.set_sweep_points(freqs) + if self.cfg_spec_mode(): + print('Enter loop') + MC.set_detector_function(self.UHFQC_spec_det) + else: + self.int_avg_det_single._set_real_imag(False) + MC.set_detector_function(self.int_avg_det_single) + MC.run(name='CW_spectroscopy'+self.msmt_suffix+label, disable_snapshot_metadata=disable_metadata) + # Stopping specmode + if self.cfg_spec_mode(): + UHFQC.spec_mode_off() + self._prep_ro_pulse(upload=True) + if analyze: + ma.Homodyne_Analysis(label=self.msmt_suffix, close_fig=close_fig) + + def measure_spectroscopy_pulsed_marked(self, freqs, MC=None, + analyze=True, close_fig=True, + label='', + prepare_for_continuous_wave=True, + trigger_idx = None, + disable_metadata=False): + """ + Performs a spectroscopy experiment by triggering the spectroscopy source + with a CCLight trigger. + + TODO: set the + """ + UHFQC = self.instr_acquisition.get_instr() + if prepare_for_continuous_wave: + self.prepare_for_continuous_wave() + if MC is None: + MC = self.instr_MC.get_instr() + + # Starting specmode if set in config + if self.cfg_spec_mode(): + UHFQC.spec_mode_on(IF=self.ro_freq_mod(), + ro_amp=self.ro_pulse_amp_CW()) + + wait_time_ns = self.spec_wait_time()*1e9 + + if trigger_idx is None: + trigger_idx = self.cfg_qubit_nr() + + # Snippet here to create and upload the CCL instructions + CCL = self.instr_CC.get_instr() + p = sqo.pulsed_spec_seq_marked( + qubit_idx=self.cfg_qubit_nr(), + spec_pulse_length=self.spec_pulse_length(), + platf_cfg=self.cfg_openql_platform_fn(), + cc=self.instr_CC(), + trigger_idx=trigger_idx if (CCL.name.upper() == 'CCL' or CCL.name.upper() == 'CC') else 15, + wait_time_ns=wait_time_ns) + + CCL.eqasm_program(p.filename) + # CCL gets started in the int_avg detector + + spec_source = self.instr_spec_source.get_instr() + spec_source.on() + # Set marker mode off for CW: + spec_source.pulsemod_state('On') + + MC.set_sweep_function(spec_source.frequency) + MC.set_sweep_points(freqs) + if self.cfg_spec_mode(): + MC.set_detector_function(self.UHFQC_spec_det) + else: + self.int_avg_det_single._set_real_imag(False) + MC.set_detector_function(self.int_avg_det_single) + MC.run(name='pulsed_marker_spectroscopy'+self.msmt_suffix+label, disable_snapshot_metadata=disable_metadata) + # Stopping specmode + if self.cfg_spec_mode(): + UHFQC.spec_mode_off() + self._prep_ro_pulse(upload=True) + if analyze and not disable_metadata: + ma.Qubit_Spectroscopy_Analysis(label=self.msmt_suffix, + close_fig=close_fig, + qb_name=self.name) + + def measure_spectroscopy_pulsed_mixer(self, freqs, MC=None, + analyze=True, close_fig=True, + label='', + prepare_for_timedomain=True, + disable_metadata=False): + """ + Performs pulsed spectroscopy by modulating a cw pulse with a square + which is generated by an AWG. Uses the self.mw_LO as spec source, as + that usually is the LO of the AWG/QWG mixer. + + Is considered as a time domain experiment as it utilizes the AWG + + Relevant parameters: + spec_pow (float): + power of the LO fed into the mixer + + spec_amp (float): + amplitude of the square waveform used to generate + microwave tone + + spec_pulse_length (float): + length of the spectroscopy pulse. The length is + controlled by the qisa file, which indicates how many 20 ns long + square pulses should be triggered back-to-back + """ + UHFQC = self.instr_acquisition.get_instr() + if MC is None: + MC = self.instr_MC.get_instr() + + # Starting specmode if set in config + if self.cfg_spec_mode(): + UHFQC.spec_mode_on(IF=self.ro_freq_mod(), + ro_amp=self.ro_pulse_amp_CW()) + + # Save current value of mw_channel_amp to make this measurement + # independent of the value. + old_channel_amp = self.mw_channel_amp() + self.mw_channel_amp(1) + + if prepare_for_timedomain: + self.prepare_for_timedomain() + # Snippet here to create and upload the CCL instructions + CCL = self.instr_CC.get_instr() + p = sqo.pulsed_spec_seq( + qubit_idx=self.cfg_qubit_nr(), + spec_pulse_length=self.spec_pulse_length(), + platf_cfg=self.cfg_openql_platform_fn()) + + CCL.eqasm_program(p.filename) + # CCL gets started in the int_avg detector + + spec_source = self.instr_spec_source_2.get_instr() + # spec_source.on() + # Set marker mode off for mixer CW: + + MC.set_sweep_function(spec_source.frequency) + MC.set_sweep_points(freqs) + + if self.cfg_spec_mode(): + print('Enter loop') + MC.set_detector_function(self.UHFQC_spec_det) + else: + self.int_avg_det_single._set_real_imag(False) + MC.set_detector_function(self.int_avg_det_single) + + # d = self.int_avg_det + # MC.set_detector_function(d) + MC.run(name='pulsed_mixer_spectroscopy'+self.msmt_suffix+label, disable_snapshot_metadata=disable_metadata) + + self.mw_channel_amp(old_channel_amp) + # Stopping specmode + if self.cfg_spec_mode(): + UHFQC.spec_mode_off() + self._prep_ro_pulse(upload=True) + if analyze: + ma.Qubit_Spectroscopy_Analysis(label=self.msmt_suffix, + close_fig=close_fig, + qb_name=self.name) + + def find_bus_frequency(self, freqs, spec_source_bus, bus_power, f01=None, + label='', close_fig=True, analyze=True, MC=None, + prepare_for_continuous_wave=True): + """ + Drive the qubit and sit at the spectroscopy peak while the bus is driven with + bus_spec_source + + Args: + freqs (array): + list of frequencies of the second drive tone (at bus frequency) + + spec_source_bus (RohdeSchwarz_SGS100A): + rf source used for the second spectroscopy tone + + bus_power (float): + power of the second spectroscopy tone + + f_01 (float): + frequency of 01 transition (default: self.freq_qubit()) + + analyze (bool): + indicates whether to look for peas in the data and perform a fit + + label (str): + suffix to append to the measurement label + + prepare_for_continuous_wave (bool): + indicates whether to regenerate a waveform + generating a readout tone and set all the instruments according + to the parameters stored in the qubit object + """ + + if f01 is None: + f01 = self.freq_qubit() + + UHFQC = self.instr_acquisition.get_instr() + if prepare_for_continuous_wave: + self.prepare_for_continuous_wave() + if MC is None: + MC = self.instr_MC.get_instr() + # Starting specmode if set in config + if self.cfg_spec_mode(): + UHFQC.spec_mode_on(IF=self.ro_freq_mod(), + ro_amp=self.ro_pulse_amp_CW()) + + # Snippet here to create and upload the CCL instructions + CCL = self.instr_CC.get_instr() + p = sqo.pulsed_spec_seq( + qubit_idx=self.cfg_qubit_nr(), + spec_pulse_length=self.spec_pulse_length(), + platf_cfg=self.cfg_openql_platform_fn()) + CCL.eqasm_program(p.filename) + # CCL gets started in the int_avg detector + + spec_source = self.instr_spec_source.get_instr() + spec_source.on() + spec_source.frequency(f01) + # spec_source.power(self.spec_pow()) + spec_source_bus.on() + spec_source_bus.power(bus_power) + MC.set_sweep_function(spec_source_bus.frequency) + MC.set_sweep_points(freqs) + if self.cfg_spec_mode(): + print('Enter loop') + MC.set_detector_function(self.UHFQC_spec_det) + else: + self.int_avg_det_single._set_real_imag(False) + MC.set_detector_function(self.int_avg_det_single) + MC.run(name='Bus_spectroscopy_'+self.msmt_suffix+label) + spec_source_bus.off() + # Stopping specmode + if self.cfg_spec_mode(): + UHFQC.spec_mode_off() + self._prep_ro_pulse(upload=True) + if analyze: + ma.Qubit_Spectroscopy_Analysis(label=self.msmt_suffix, + close_fig=close_fig, + qb_name=self.name) + + def bus_frequency_flux_sweep(self, freqs, spec_source_bus, bus_power, dacs, dac_param, f01=None, label='', + close_fig=True, analyze=True, MC=None, + prepare_for_continuous_wave=True): + """ + Drive the qubit and sit at the spectroscopy peak while the bus is driven with + bus_spec_source. At the same time sweep dac channel specified by dac_param over + set of values sepcifeid by dacs. + + Practical comments: + - sweep flux bias of different (neighbour) qubit than the one measured + - set spec_power of the first tone high (say, +15 dB relative to value optimal + for sharp spectroscopy). This makes you less sensitive to flux crosstalk. + + Args: + freqs (array): + list of frequencies of the second drive tone (at bus frequency) + + spec_source_bus (RohdeSchwarz_SGS100A): + rf source used for the second spectroscopy tone + + bus_power (float): + power of the second spectroscopy tone + + dacs (array): + valuses of current bias to measure + + dac_param (str): + parameter corresponding to the sweeped current bias + + f_01 (flaot): + frequency of 01 transition (default: self.freq_qubit()) + + analyze (bool): + indicates whether to look for peas in the data and perform a fit + + label (bool): + suffix to append to the measurement label + + prepare_for_continuous_wave (bool): + indicates whether to regenerate a waveform + generating a readout tone and set all the instruments according + to the parameters stored in the qubit object + """ + if f01 == None: + f01 = self.freq_qubit() + + UHFQC = self.instr_acquisition.get_instr() + if prepare_for_continuous_wave: + self.prepare_for_continuous_wave() + if MC is None: + MC = self.instr_MC.get_instr() + # Starting specmode if set in config + if self.cfg_spec_mode(): + UHFQC.spec_mode_on(IF=self.ro_freq_mod(), + ro_amp=self.ro_pulse_amp_CW()) + + # Snippet here to create and upload the CCL instructions + CCL = self.instr_CC.get_instr() + p = sqo.pulsed_spec_seq( + qubit_idx=self.cfg_qubit_nr(), + spec_pulse_length=self.spec_pulse_length(), + platf_cfg=self.cfg_openql_platform_fn()) + CCL.eqasm_program(p.filename) + # CCL gets started in the int_avg detector + + spec_source = self.instr_spec_source.get_instr() + spec_source.on() + spec_source.frequency(f01) + # spec_source.power(self.spec_pow()) + spec_source_bus.on() + spec_source_bus.power(bus_power) + + MC.set_sweep_function(spec_source_bus.frequency) + MC.set_sweep_points(freqs) + + MC.set_sweep_function_2D(dac_param) + MC.set_sweep_points_2D(dacs) + + if self.cfg_spec_mode(): + print('Enter loop') + MC.set_detector_function(self.UHFQC_spec_det) + else: + self.int_avg_det_single._set_real_imag(False) + MC.set_detector_function(self.int_avg_det_single) + MC.run(name='Bus_flux_sweep_'+self.msmt_suffix+label, mode='2D') + spec_source_bus.off() + + # Stopping specmode + if self.cfg_spec_mode(): + UHFQC.spec_mode_off() + self._prep_ro_pulse(upload=True) + if analyze: + ma.TwoD_Analysis(label=self.msmt_suffix, close_fig=close_fig) + + def measure_anharmonicity(self, + freqs_01=None, freqs_12=None, + f_01_power=None, f_12_power=None, + MC=None, spec_source_2=None, + mode='pulsed_marked',step_size:int= 1e6): + """ + Measures the qubit spectroscopy as a function of frequency of the two + driving tones. The qubit transitions are observed when frequency of one + drive matches the qubit frequency, or when sum of frequencies matches + energy difference between ground and second excited state. Consequently + frequency of 01 and 12 transitions can be extracted simultaneously + yoielding anharmonicity measurement. + + Typically a good guess for the 12 transition frequencies is + f01 + alpha where alpha is the anharmonicity and typically ~ -300 MHz + + Args: + freqs_01: frequencies of the first qubit drive + freqs_12: frequencies of the second qubit drive + f_01_power: power of the first qubit drive. By default the power + is set to self.spec_pow + f_12_power: power of the second qubit drive. By default the power + is set to self.spec_pow. Likely it needs to be increased + by 10-20 dB to yield meaningful result + spec_source_2: instrument used to apply second MW drive. + By default instrument specified by self.instr_spec_source_2 is used + mode (str): + if pulsed_marked uses pulsed spectroscopy sequence assuming + that the sources are pulsed using a marker. + Otherwise, uses CW spectroscopy. + """ + # f_anharmonicity = np.mean(freqs_01) - np.mean(freqs_12) + # if f_01_power == None: + # f_01_power = self.spec_pow() + # if f_12_power == None: + # f_12_power = f_01_power+20 + if freqs_01 is None: + freqs_01 = self.freq_qubit()+np.arange(-20e6, 20.1e6, step_size) + if freqs_12 is None: + freqs_12 = self.freq_qubit() + self.anharmonicity() + \ + np.arange(-20e6, 20.1e6, 1e6) + f_anharmonicity = np.mean(freqs_01) - np.mean(freqs_12) + if f_01_power == None: + f_01_power = self.spec_pow() + if f_12_power == None: + f_12_power = f_01_power+5 + print('f_anharmonicity estimation', f_anharmonicity) + print('f_12 estimations', np.mean(freqs_12)) + CCL = self.instr_CC.get_instr() + if mode == 'pulsed_marked': + p = sqo.pulsed_spec_seq_marked( + qubit_idx=self.cfg_qubit_nr(), + spec_pulse_length=self.spec_pulse_length(), + platf_cfg=self.cfg_openql_platform_fn(), + trigger_idx=0, + trigger_idx_2=9) + else: + p = sqo.pulsed_spec_seq( + qubit_idx=self.cfg_qubit_nr(), + spec_pulse_length=self.spec_pulse_length(), + platf_cfg=self.cfg_openql_platform_fn()) + CCL.eqasm_program(p.filename) + if MC is None: + MC = self.instr_MC.get_instr() + if spec_source_2 is None: + spec_source_2 = self.instr_spec_source_2.get_instr() + spec_source = self.instr_spec_source.get_instr() + + self.prepare_for_continuous_wave() + self.int_avg_det_single._set_real_imag(False) + spec_source.on() + if mode == 'pulsed_marked': + spec_source.pulsemod_state('On') + else: + spec_source.pulsemod_state('Off') + + spec_source.power(f_01_power) + + spec_source_2.on() + if mode == 'pulsed_marked': + spec_source_2.pulsemod_state('On') + else: + spec_source_2.pulsemod_state('Off') + spec_source_2.power(f_12_power) + + MC.set_sweep_function(wrap_par_to_swf( + spec_source.frequency, retrieve_value=True)) + MC.set_sweep_points(freqs_01) + MC.set_sweep_function_2D(wrap_par_to_swf( + spec_source_2.frequency, retrieve_value=True)) + MC.set_sweep_points_2D(freqs_12) + MC.set_detector_function(self.int_avg_det_single) + MC.run_2D(name='Two_tone_'+self.msmt_suffix) + ma.TwoD_Analysis(auto=True) + spec_source.off() + spec_source_2.off() + ma.Three_Tone_Spectroscopy_Analysis( + label='Two_tone', f01=np.mean(freqs_01), f12=np.mean(freqs_12)) + + def measure_anharmonicity_GBT(self, freqs_01=None, freqs_12=None, f_01_power=None, + f_12_power=None, + MC=None, spec_source_2=None, + mode='pulsed_marked'): + """ + Measures the qubit spectroscopy as a function of frequency of the two + driving tones. The qubit transitions are observed when frequency of one + drive matches the qubit frequency, or when sum of frequencies matches + energy difference between ground and second excited state. Consequently + frequency of 01 and 12 transitions can be extracted simultaneously + yoielding anharmonicity measurement. + + Typically a good guess for the 12 transition frequencies is + f01 + alpha where alpha is the anharmonicity and typically ~ -300 MHz + + Args: + freqs_01: frequencies of the first qubit drive + freqs_12: frequencies of the second qubit drive + f_01_power: power of the first qubit drive. By default the power + is set to self.spec_pow + f_12_power: power of the second qubit drive. By default the power + is set to self.spec_pow. Likely it needs to be increased + by 10-20 dB to yield meaningful result + spec_source_2: instrument used to apply second MW drive. + By default instrument specified by self.instr_spec_source_2 is used + mode (str): + if pulsed_marked uses pulsed spectroscopy sequence assuming + that the sources are pulsed using a marker. + Otherwise, uses CW spectroscopy. + """ + if freqs_01 is None: + freqs_01 = self.freq_qubit()+np.arange(-30e6, 30.1e6, 0.5e6) + if freqs_12 is None: + freqs_12 = self.freq_qubit() + self.anharmonicity() + \ + np.arange(-30e6, 30.1e6, 0.5e6) + f_anharmonicity = np.mean(freqs_01) - np.mean(freqs_12) + if f_01_power == None: + f_01_power = self.spec_pow() + if f_12_power == None: + f_12_power = f_01_power+20 + + print('f_anharmonicity estimation', f_anharmonicity) + print('f_12 estimations', np.mean(freqs_12)) + CCL = self.instr_CC.get_instr() + p = sqo.pulsed_spec_seq_marked( + qubit_idx=self.cfg_qubit_nr(), + spec_pulse_length=self.spec_pulse_length(), + platf_cfg=self.cfg_openql_platform_fn(), + trigger_idx=0) + CCL.eqasm_program(p.filename) + if MC is None: + MC = self.instr_MC.get_instr() + if spec_source_2 is None: + spec_source_2 = self.instr_spec_source_2.get_instr() + spec_source = self.instr_spec_source.get_instr() + old_spec_pow = self.spec_pow() + + self.prepare_for_continuous_wave() + self.int_avg_det_single._set_real_imag(False) + spec_source.on() + if mode == 'pulsed_marked': + spec_source.pulsemod_state('On') + else: + spec_source.pulsemod_state('Off') + + spec_source.power(f_01_power) + + spec_source_2.on() + if mode == 'pulsed_marked': + spec_source_2.pulsemod_state('On') + else: + spec_source_2.pulsemod_state('Off') + spec_source_2.power(f_12_power) + + MC.set_sweep_function(wrap_par_to_swf( + spec_source.frequency, retrieve_value=True)) + MC.set_sweep_points(freqs_01) + MC.set_sweep_function_2D(wrap_par_to_swf( + spec_source_2.frequency, retrieve_value=True)) + MC.set_sweep_points_2D(freqs_12) + MC.set_detector_function(self.int_avg_det_single) + MC.run_2D(name='Two_tone_'+self.msmt_suffix) + ma.TwoD_Analysis(auto=True) + spec_source.off() + spec_source_2.off() + self.spec_pow(old_spec_pow) + + # if analyze: + # a = ma.Three_Tone_Spectroscopy_Analysis(label='Two_tone', f01=np.mean(freqs_01), f12=np.mean(freqs_12)) + # if update: + # self.anharmonicity(a.anharm) + # return a.T1 + + ma_obj = ma.Three_Tone_Spectroscopy_Analysis_test( + label='Two_tone', f01=np.mean(freqs_01), f12=np.mean(freqs_12)) + rel_change = (abs(self.anharmonicity()) - + ma_obj.Anharm_dict['anharmonicity'])/self.anharmonicity() + threshold_for_change = 0.1 + if np.abs(rel_change) > threshold_for_change: + return False + else: + return True + + def measure_photon_nr_splitting_from_bus(self, f_bus, freqs_01=None, + powers=np.arange(-10, 10, 1), MC=None, + spec_source_2=None): + """ + Measures photon splitting of the qubit due to photons in the bus resonators. + Specifically it is a CW qubit spectroscopy with the second variable-power CW tone + applied at frequency f_bus. + + Refs: + Schuster Nature 445, 515–518 (2007) + (note that in the paper RO resonator has lower frequency than the qubit) + + Args: + f_bus: bus frequency at which variable-power CW tone is applied + freqs_01: range of frequencies of the CW qubit MW drive. If not specified + range -60 MHz to +5 MHz around freq_qubit fill be used. + powers: sweeped powers of the bus CW drive. + spec_source_2: sepcifies instrument used to apply bus MW drive. By default + instr_spec_source_2 is used. + """ + if freqs_01 is None: + freqs_01 = np.arange(self.freq_qubit()-60e6, + self.freq_qubit()+5e6, 0.7e6) + + self.prepare_for_continuous_wave() + if MC is None: + MC = self.instr_MC.get_instr() + CCL = self.instr_CC.get_instr() + if spec_source_2 is None: + spec_source_2 = self.instr_spec_source_2.get_instr() + spec_source = self.instr_spec_source.get_instr() + p = sqo.pulsed_spec_seq( + qubit_idx=self.cfg_qubit_nr(), + spec_pulse_length=self.spec_pulse_length(), + platf_cfg=self.cfg_openql_platform_fn()) + CCL.eqasm_program(p.filename) + self.int_avg_det_single._set_real_imag(False) + spec_source.on() + spec_source.power(self.spec_pow()) + spec_source_2.on() + spec_source_2.frequency(f_bus) + + MC.set_sweep_function(wrap_par_to_swf( + spec_source.frequency, retrieve_value=True)) + MC.set_sweep_points(freqs_01) + + MC.set_sweep_function_2D(wrap_par_to_swf( + spec_source_2.power, retrieve_value=True)) + MC.set_sweep_points_2D(powers) + MC.set_detector_function(self.int_avg_det_single) + + MC.run_2D(name='Photon_nr_splitting'+self.msmt_suffix) + + ma.TwoD_Analysis(auto=True) + spec_source.off() + spec_source_2.off() + + def measure_ssro( + self, + MC=None, + f_state: bool = False, + h_state: bool = False, + prepare: bool = True, + no_figs: bool = False, + post_select: bool = False, + update: bool = True, + nr_shots_per_case: int = 2**13, + post_select_threshold: float = None, + SNR_detector: bool = False, + vary_residual_excitation: bool = True, + disable_metadata: bool = False, + label: str = '' + ): + """ + Performs a number of single shot measurements with qubit in ground and excited state + to extract the SNR and readout fidelities. + + Args: + analyze (bool): + should the analysis be executed + + nr_shots_per_case (int): + total number of measurements in qubit ground and excited state + + prepare (bool): + should the prepare_for_timedomain be executed? + + SNR_detector (bool): + the function will return a dictionary suitable, making this function + easier to use as a detector in the nested measurement + + shots_per_meas (int): + number of single shot measurements per single + acquisition with UHFQC + ... + """ + # off and on, not including post selection init measurements yet + nr_shots = 2 * nr_shots_per_case + if f_state: + nr_shots += nr_shots_per_case + if post_select: + nr_shots *= 2 + old_RO_digit = self.ro_acq_digitized() + self.ro_acq_digitized(False) + if MC is None: + MC = self.instr_MC.get_instr() + # plotting really slows down SSRO (16k shots plotting is slow) + old_plot_setting = MC.live_plot_enabled() + MC.live_plot_enabled(False) + if prepare: + # Set default lutmap to ensure rx12 is uploaded + if f_state: + mw_lm = self.instr_LutMan_MW.get_instr() + mw_lm.set_default_lutmap() + self.prepare_for_timedomain() + pulse_comb = 'off_on' + if f_state: + pulse_comb += '_two' + if h_state: + pulse_comb += '_three' + p = sqo.off_on( + qubit_idx=self.cfg_qubit_nr(), + pulse_comb=pulse_comb, + initialize=post_select, + platf_cfg=self.cfg_openql_platform_fn()) + self.instr_CC.get_instr().eqasm_program(p.filename) + # digitization setting is reset here but the detector still uses + # the disabled setting that was set above + self.ro_acq_digitized(old_RO_digit) + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name='Shot', unit='#', + upload=prepare) + MC.soft_avg(1) # don't want to average single shots + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(nr_shots)) + d = self.int_log_det + d.nr_shots = nr_shots + MC.set_detector_function(d) + if f_state: + label = 'f' + MC.run('SSRO_{}{}'.format(label, self.msmt_suffix), + disable_snapshot_metadata=disable_metadata) + MC.live_plot_enabled(old_plot_setting) + a = ma2.ra.Singleshot_Readout_Analysis( + qubit=self.name, + qubit_freq=self.freq_qubit(), + heralded_init=post_select, + f_state=f_state, + h_state=h_state, + extract_only=no_figs) + #################################### + # SSRO Analysis (Old and deprecated) + #################################### + # post_select_threshold = self.ro_acq_threshold() + # options_dict = {'post_select': post_select, + # 'nr_samples': 2+2*post_select, + # 'post_select_threshold': post_select_threshold, + # 'predict_qubit_temp': True, + # 'qubit_freq': self.freq_qubit()} + # if not vary_residual_excitation: + # options_dict.update( + # {'fixed_p10': self.res_exc, + # 'fixed_p01': self.mmt_rel}) + # a = ma2.ra.Singleshot_Readout_Analysis_old( + # options_dict=options_dict, + # extract_only=no_figs) + ###################################################################### + # Update parameters in the qubit object based on the analysis + ###################################################################### + if update: + self.ro_acq_threshold(a.proc_data_dict['threshold_raw']) + return {'SNR': a.qoi['SNR'], + 'F_d': a.qoi['F_d'], + 'F_a': a.qoi['F_a'], + 'relaxation': a.proc_data_dict['relaxation_events'], + 'excitation': a.proc_data_dict['residual_excitation']} + + def measure_ssro_vs_TWPA_frequency_power( + self, pump_source, freqs, powers, + nr_shots=4092*4, nested_MC=None, analyze=True): + """ + Measures the SNR and readout fidelities as a function of the TWPA + pump frequency and power. + + Args: + pump_source (RohdeSchwarz_SGS100A): + object controlling the MW source serving as TWPA pump + + freqs (array): + TWPA pump frequencies to sweep over + + powers (array): + list of TWPA pump powers to sweep over + + nr_shots (int): + number of single-shot measurements used to estimate SNR + and redout fidelities + """ + log.warning('FIXME: Does not make use of the SSRO detector') + + if nested_MC is None: + nested_MC = self.instr_nested_MC.get_instr() + + self.prepare_for_timedomain() + RO_lutman = self.instr_LutMan_RO.get_instr() + old_ro_prepare_state = self.cfg_prepare_ro_awg() + self.ro_acq_digitized(False) + self.cfg_prepare_ro_awg(False) + + d = det.Function_Detector( + self.measure_ssro, + msmt_kw={ + 'nr_shots': nr_shots, + 'analyze': True, 'SNR_detector': True, + 'cal_residual_excitation': True, + 'prepare': False, + 'disable_metadata': True + }, + result_keys=['SNR', 'F_d', 'F_a'] + ) + nested_MC.set_sweep_function(pump_source.frequency) + nested_MC.set_sweep_points(freqs) + nested_MC.set_detector_function(d) + nested_MC.set_sweep_function_2D(pump_source.power) + nested_MC.set_sweep_points_2D(powers) + label = 'SSRO_freq_amp_sweep' + self.msmt_suffix + nested_MC.run(label, mode='2D') + + self.cfg_prepare_ro_awg(old_ro_prepare_state) + + if analyze: + ma.TwoD_Analysis(label=label, plot_all=True, auto=True) + + def measure_ssro_vs_pulse_length(self, lengths=np.arange(100e-9, 1501e-9, 100e-9), + nr_shots=4092*4, nested_MC=None, analyze=True, + label_suffix: str = ''): + """ + Measures the SNR and readout fidelities as a function of the duration + of the readout pulse. For each pulse duration transients are + measured and optimal weights calculated. + + Args: + lengths (array): + durations of the readout pulse for which SNR is measured + + nr_shots (int): + number of single-shot measurements used to estimate SNR + and redout fidelities + """ + log.warning('FIXME: Does not make use of the SSRO detector') + + if nested_MC is None: + nested_MC = self.instr_nested_MC.get_instr() + self.ro_acq_digitized(False) + self.prepare_for_timedomain() + RO_lutman = self.instr_LutMan_RO.get_instr() + + sweep_function = swf.lutman_par_UHFQC_dig_trig( + LutMan=RO_lutman, + LutMan_parameter=RO_lutman['M_length_R{}'.format( + self.cfg_qubit_nr())] + ) + + d = det.Function_Detector( + self.calibrate_optimal_weights, + msmt_kw={ + 'analyze': True, + }, + result_keys=['SNR', 'F_d', 'F_a', 'relaxation', 'excitation'] + ) + # nested_MC.set_sweep_function(sweep_function) + nested_MC.set_sweep_function(self.ro_pulse_length) + nested_MC.set_sweep_points(lengths) + nested_MC.set_detector_function(d) + label = 'SSRO_length_sweep' + self.msmt_suffix + label_suffix + nested_MC.run(label) + + if analyze: + ma.MeasurementAnalysis(label=label, plot_all=False, auto=True) + + def measure_transients( + self, + MC=None, + analyze: bool = True, + cases: tuple = ('off', 'on'), + prepare: bool = True, + depletion_analysis: bool = True, + depletion_analysis_plot: bool = True, + depletion_optimization_window: bool = None, + disable_metadata: bool = False, + plot_max_time: bool = None + ): + # docstring from parent class + if MC is None: + MC = self.instr_MC.get_instr() + if plot_max_time is None: + plot_max_time = self.ro_acq_integration_length()+250e-9 + + if prepare: + self.prepare_for_timedomain() + + transients = [] + for i, pulse_comb in enumerate(cases): + if pulse_comb.lower() in ['off', 'on', 'two']: + p = sqo.off_on( + qubit_idx=self.cfg_qubit_nr(), + pulse_comb=pulse_comb, + initialize=False, + platf_cfg=self.cfg_openql_platform_fn()) + self.instr_CC.get_instr().eqasm_program(p.filename) + else: + raise ValueError(f"pulse_comb {pulse_comb} not understood: Only 'on' and 'off' allowed.") + + s = swf.OpenQL_Sweep( + openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name='Transient time', + unit='s', + upload=prepare) + MC.set_sweep_function(s) + + if 'UHFQC' in self.instr_acquisition(): + sampling_rate = 1.8e9 + else: + raise NotImplementedError() + MC.set_sweep_points( + np.arange(self.input_average_detector.nr_samples) / sampling_rate) + MC.set_detector_function(self.input_average_detector) + data = MC.run( + 'Measure_transients{}_{}'.format(self.msmt_suffix, i), + disable_snapshot_metadata=disable_metadata) + dset = data['dset'] + transients.append(dset.T[1:]) + if analyze: + ma.MeasurementAnalysis() + if depletion_analysis: + a = ma.Input_average_analysis( + IF=self.ro_freq_mod(), + optimization_window=depletion_optimization_window, + plot=depletion_analysis_plot, + plot_max_time=plot_max_time) + return a + else: + return [np.array(t, dtype=np.float64) for t in transients] + + def measure_RO_QND( + self, + prepare_for_timedomain: bool = True, + calibrate_optimal_weights: bool = False, + nr_max_acq: int = 2**17, + disable_metadata: bool = False, + no_figs: bool = False, + f_state: bool = False + ): + # ensure readout settings are correct + assert self.ro_acq_weight_type() != 'optimal' + assert self.ro_acq_digitized() == False + + if calibrate_optimal_weights: + self.calibrate_optimal_weights( + prepare=prepare_for_timedomain, + verify=False, + optimal_IQ=True, + disable_metadata=True) + + if prepare_for_timedomain: + if f_state: + mw_lm = self.instr_LutMan_MW.get_instr() + LM = mw_lm.LutMap() + LM[9] = {'name': 'rX12', 'theta': 180, 'phi': 0, 'type': 'ef'} + mw_lm.LutMap(LM) + self.prepare_for_timedomain() + + d = self.int_log_det + # the QND sequence has 5 measurements, + # therefore we need to make sure the number of shots is a multiple of that + uhfqc_max_avg = min(max(2**10, nr_max_acq), 2**17) + nr_measurements: int = 5 + nr_shots: int = int(uhfqc_max_avg/nr_measurements) * nr_measurements + d.nr_shots = nr_shots + p = sqo.RO_QND_sequence( + q_idx=self.cfg_qubit_nr(), + platf_cfg=self.cfg_openql_platform_fn(), + f_state=f_state + ) + s = swf.OpenQL_Sweep( + openql_program=p, + CCL=self.instr_CC.get_instr() + ) + MC = self.instr_MC.get_instr() + MC.soft_avg(1) + MC.live_plot_enabled(False) + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(nr_shots)) + MC.set_detector_function(d) + MC.run( + f"RO_QND_measurement_{self.name}", + disable_snapshot_metadata=disable_metadata + ) + a = ma2.ra.measurement_QND_analysis( + qubit=self.name, + f_state=f_state, + label='QND', + extract_only=no_figs) + return a.qoi + + def calibrate_RO_QND( + self, + frequencies: list, + amplitudes: list, + calibrate_optimal_weights: bool=False, + # use_rx12:bool=False, + update_ro_params: bool = True): + ''' + Sweeps readout frequency and amplitude while measuring + RO fidelity and QNDness. Updates readout frequency and + amplitude to optimal values in the sweep based on P_QND. + ''' + # ensure readout settings are correct + assert self.ro_acq_weight_type() != 'optimal' + assert self.ro_acq_digitized() == False + # prepare for timedomain + self.prepare_for_timedomain() + # Set sweep function and detector function + ro_lm = self.instr_LutMan_RO.get_instr() + q_idx = self.cfg_qubit_nr() + swf1 = swf.RO_freq_sweep(qubit=self) + swf2 = swf.lutman_par_UHFQC_dig_trig( + LutMan=ro_lm, + LutMan_parameter=ro_lm.parameters[f'M_amp_R{q_idx}']) + d = det.Function_Detector( + self.measure_RO_QND, + msmt_kw={'prepare_for_timedomain' : False, + 'disable_metadata' : True, + 'calibrate_optimal_weights':calibrate_optimal_weights, + 'nr_max_acq' : 2**15, + # 'use_rx12':use_rx12, + 'no_figs' : False}, + result_keys=['Fidelity', 'P_QND', 'P_QNDp'], + value_names=['Fidelity', 'P_QND', 'P_QNDp'], + value_units=['fraction', 'fraction', 'fraction']) + nested_MC = self.instr_nested_MC.get_instr() + nested_MC.set_sweep_function(swf1) + nested_MC.set_sweep_function_2D(swf2) + nested_MC.set_sweep_points(frequencies) + nested_MC.set_sweep_points_2D(amplitudes) + nested_MC.set_detector_function(d) + nested_MC.run(f'RO_QND_sweep_{self.name}', mode='2D') + + a = ma2.mra.Readout_sweep_analysis( + label=f'RO_QND_sweep_{self.name}', + qubit=self.name, + frequencies=frequencies, + amplitudes=amplitudes) + + if update_ro_params: + opt_freq, opt_amp = a.qoi['Opt_Cal'] + self.ro_freq(opt_freq) + self.ro_pulse_amp(opt_amp) + + def measure_msmt_butterfly( + self, + prepare_for_timedomain: bool = True, + calibrate_optimal_weights: bool = False, + nr_max_acq: int = 2**17, + disable_metadata: bool = False, + f_state: bool = False, + no_figs: bool = False): + + # ensure readout settings are correct + assert self.ro_acq_weight_type() != 'optimal' + assert self.ro_acq_digitized() == False + + if calibrate_optimal_weights: + self.calibrate_optimal_weights( + prepare=prepare_for_timedomain, + verify=False, + optimal_IQ=True, + disable_metadata=True) + + if prepare_for_timedomain: + self.prepare_for_timedomain() + + d = self.int_log_det + # the msmt butterfly sequence has 3 measurements per state, + # therefore we need to make sure the number of shots is a multiple of that + uhfqc_max_avg = min(max(2**10, nr_max_acq), 2**20) + + if f_state: + nr_measurements = 12 + else: + nr_measurements = 8 + + nr_shots = int((uhfqc_max_avg//nr_measurements) * nr_measurements) + d.nr_shots = nr_shots + p = sqo.butterfly( + f_state = f_state, + qubit_idx=self.cfg_qubit_nr(), + platf_cfg=self.cfg_openql_platform_fn(), + ) + s = swf.OpenQL_Sweep( + openql_program=p, + CCL=self.instr_CC.get_instr() + ) + MC = self.instr_MC.get_instr() + MC.soft_avg(1) + MC.live_plot_enabled(False) + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(nr_shots)) + MC.set_detector_function(d) + MC.run( + f"Measurement_butterfly_{self.name}_{f_state}", + disable_snapshot_metadata=disable_metadata + ) + a = ma2.ra.measurement_butterfly_analysis( + qubit=self.name, + label='butterfly', + f_state=f_state, + extract_only=no_figs) + return a.qoi + + def measure_transients_CCL_switched(self, MC=None, analyze: bool = True, + cases=('off', 'on'), + prepare: bool = True, depletion_analysis: bool = True, + depletion_analysis_plot: bool = True, + depletion_optimization_window=None): + # docstring from parent class + if MC is None: + MC = self.instr_MC.get_instr() + + self.prepare_for_timedomain() + # off/on switching is achieved by turning the MW source on and + # off as this is much faster than recompiling/uploading + + transients = [] + for i, pulse_comb in enumerate(cases): + p = sqo.off_on( + qubit_idx=self.cfg_qubit_nr(), pulse_comb=pulse_comb, + initialize=False, + platf_cfg=self.cfg_openql_platform_fn()) + self.instr_CC.get_instr().eqasm_program(p.filename) + + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name='Transient time', unit='s', + upload=prepare) + MC.set_sweep_function(s) + + if 'UHFQC' in self.instr_acquisition(): + sampling_rate = 1.8e9 + else: + raise NotImplementedError() + MC.set_sweep_points( + np.arange(self.input_average_detector.nr_samples) / + sampling_rate) + MC.set_detector_function(self.input_average_detector) + data = MC.run( + 'Measure_transients{}_{}'.format(self.msmt_suffix, i)) + dset = data['dset'] + transients.append(dset.T[1:]) + if analyze: + ma.MeasurementAnalysis() + if depletion_analysis: + a = ma.Input_average_analysis( + IF=self.ro_freq_mod(), + optimization_window=depletion_optimization_window, + plot=depletion_analysis_plot) + return a + else: + return [np.array(t, dtype=np.float64) for t in transients] + + def measure_dispersive_shift_pulsed( + self, + freqs=None, + cases=['off', 'on'], + analyze: bool = True, + prepare: bool = True): + """ + Measures the RO resonator spectroscopy with the qubit in ground and excited state. + Specifically, performs two experiments. Applies sequence: + - initialize qubit in ground state ( wait) + - (only in the second experiment) apply a (previously tuned up) pi pulse + - apply readout pulse and measure + This sequence is repeated while sweeping ro_freq. + + Args: + freqs (array): + sweeped range of ro_freq + """ + + # docstring from parent class + MC = self.instr_MC.get_instr() + + if freqs is None: + if self.freq_res() is None: + raise ValueError( + "Qubit has no resonator frequency.\ + \nUpdate freq_res parameter.") + else: + freqs = self.freq_res()+np.arange(-10e6, 5e6, .1e6) + + if 'optimal' in self.ro_acq_weight_type(): + raise ImplementationError( + "Change readout demodulation to SSB.") + + self.prepare_for_timedomain() + + # off/on switching is achieved by turning the MW source on and + # off as this is much faster than recompiling/uploading + f_res = [] + for i, pulse_comb in enumerate(cases): + p = sqo.off_on( + qubit_idx=self.cfg_qubit_nr(), pulse_comb=pulse_comb, + initialize=False, + platf_cfg=self.cfg_openql_platform_fn()) + self.instr_CC.get_instr().eqasm_program(p.filename) + # CCL gets started in the int_avg detector + + MC.set_sweep_function(swf.Heterodyne_Frequency_Sweep_simple( + MW_LO_source=self.instr_LO_ro.get_instr(), + IF=self.ro_freq_mod())) + MC.set_sweep_points(freqs) + + self.int_avg_det_single._set_real_imag(False) + MC.set_detector_function(self.int_avg_det_single) + MC.run(name='Resonator_scan_'+pulse_comb+self.msmt_suffix) + + if analyze: + a = ma2.ra.Dispersive_shift_Analysis(self.name) + return True + + def measure_rabi(self, MC=None, amps=np.linspace(0, 1, 31), + analyze=True, close_fig=True, real_imag=True, + prepare_for_timedomain=True, all_modules=False): + """ + Perform a Rabi experiment in which amplitude of the MW pulse is sweeped + while the drive frequency and pulse duration is kept fixed + + Args: + amps (array): + range of amplitudes to sweep. If cfg_with_vsm()==True pulse amplitude + is adjusted by sweeping the attenuation of the relevant gaussian VSM channel, + in max range (0.1 to 1.0). + If cfg_with_vsm()==False adjusts the channel amplitude of the AWG in range (0 to 1). + + Relevant parameters: + mw_amp180 (float): + amplitude of the waveform corresponding to pi pulse (from 0 to 1) + + mw_channel_amp (float): + AWG channel amplitude (digitally scaling the waveform; form 0 to 1) + """ + + if self.cfg_with_vsm(): + self.measure_rabi_vsm(MC, amps, + analyze, close_fig, real_imag, + prepare_for_timedomain, all_modules) + else: + self.measure_rabi_channel_amp(MC, amps, + analyze, close_fig, real_imag, + prepare_for_timedomain) + + def measure_rabi_vsm(self, MC=None, amps=np.linspace(0.1, 1.0, 31), + analyze=True, close_fig=True, real_imag=True, + prepare_for_timedomain=True, all_modules=False): + """ + Perform a Rabi experiment in which amplitude of the MW pulse is sweeped + while the drive frequency and pulse duration is kept fixed + + Args: + amps (array): + range of amplitudes to sweep. Pulse amplitude is adjusted by sweeping + the attenuation of the relevant gaussian VSM channel, + in max range (0.1 to 1.0). + """ + if MC is None: + MC = self.instr_MC.get_instr() + if prepare_for_timedomain: + self.prepare_for_timedomain() + p = sqo.off_on( + qubit_idx=self.cfg_qubit_nr(), pulse_comb='on', + initialize=False, + platf_cfg=self.cfg_openql_platform_fn()) + + VSM = self.instr_VSM.get_instr() + + mod_out = self.mw_vsm_mod_out() + ch_in = self.mw_vsm_ch_in() + if all_modules: + mod_sweep = [] + for i in range(8): + VSM.set('mod{}_ch{}_marker_state'.format(i+1, ch_in), 'on') + G_par = VSM.parameters['mod{}_ch{}_gaussian_amp'.format( + i+1, ch_in)] + D_par = VSM.parameters['mod{}_ch{}_derivative_amp'.format( + i+1, ch_in)] + mod_sweep.append(swf.two_par_joint_sweep( + G_par, D_par, preserve_ratio=False)) + s = swf.multi_sweep_function(sweep_functions=mod_sweep, + retrieve_value=True) + else: + G_par = VSM.parameters['mod{}_ch{}_gaussian_amp'.format( + mod_out, ch_in)] + D_par = VSM.parameters['mod{}_ch{}_derivative_amp'.format( + mod_out, ch_in)] + + s = swf.two_par_joint_sweep(G_par, D_par, preserve_ratio=False, + retrieve_value=True, instr=VSM) + + self.instr_CC.get_instr().eqasm_program(p.filename) + MC.set_sweep_function(s) + MC.set_sweep_points(amps) + # real_imag is acutally not polar and as such works for opt weights + self.int_avg_det_single._set_real_imag(real_imag) + MC.set_detector_function(self.int_avg_det_single) + MC.run(name='rabi_'+self.msmt_suffix) + ma.Rabi_Analysis(label='rabi_') + return True + + def measure_rabi_channel_amp(self, MC=None, amps=np.linspace(0, 1, 31), + analyze=True, close_fig=True, real_imag=True, + prepare_for_timedomain=True, + disable_metadata: bool = False): + """ + Perform a Rabi experiment in which amplitude of the MW pulse is sweeped + while the drive frequency and pulse duration is kept fixed + + Args: + amps (array): + range of amplitudes to sweep. Amplitude is adjusted via the channel + amplitude of the AWG, in max range (0 to 1). + """ + + + MW_LutMan = self.instr_LutMan_MW.get_instr() + + if MC is None: + MC = self.instr_MC.get_instr() + if prepare_for_timedomain: + self.prepare_for_timedomain() + + p = sqo.off_on( + qubit_idx=self.cfg_qubit_nr(), pulse_comb='on', + initialize=False, + platf_cfg=self.cfg_openql_platform_fn()) + self.instr_CC.get_instr().eqasm_program(p.filename) + + s = MW_LutMan.channel_amp + print(s) + MC.set_sweep_function(s) + MC.set_sweep_points(amps) + # real_imag is acutally not polar and as such works for opt weights + self.int_avg_det_single._set_real_imag(real_imag) + MC.set_detector_function(self.int_avg_det_single) + + label = '' + MC.run(name=f'rabi_'+self.msmt_suffix+label, disable_snapshot_metadata=disable_metadata) + if analyze and not disable_metadata: + ma.Rabi_Analysis(label='rabi_') + return True + + def measure_rabi_mw_crosstalk(self, MC=None, amps=np.linspace(0, 1, 31), + cross_driving_qubit=None, + analyze=True, close_fig=True, real_imag=True, + disable_metadata = False, + prepare_for_timedomain=True): + """ + Perform a Rabi experiment in which amplitude of the MW pulse is sweeped + while the drive frequency and pulse duration is kept fixed + + Args: + amps (array): + range of amplitudes to sweep. Amplitude is adjusted via the channel + amplitude of the AWG, in max range (0 to 1). + """ + + if cross_driving_qubit is not None: + MW_LutMan = self.find_instrument(cross_driving_qubit).instr_LutMan_MW.get_instr() + qubi_cd_idx = self.find_instrument(cross_driving_qubit).cfg_qubit_nr() + self.find_instrument(cross_driving_qubit)._prep_td_sources() + self.find_instrument(cross_driving_qubit)._prep_mw_pulses() + + else: + MW_LutMan = self.instr_LutMan_MW.get_instr() + + if MC is None: + MC = self.instr_MC.get_instr() + if prepare_for_timedomain: + self.prepare_for_timedomain() + + p = sqo.off_on_mw_crosstalk( + qubit_idx=self.cfg_qubit_nr(), pulse_comb='on', + initialize=False, + cross_driving_qubit=qubi_cd_idx if cross_driving_qubit else None, + platf_cfg=self.cfg_openql_platform_fn()) + self.instr_CC.get_instr().eqasm_program(p.filename) + + s = MW_LutMan.channel_amp + print(s) + MC.set_sweep_function(s) + MC.set_sweep_points(amps) + # real_imag is acutally not polar and as such works for opt weights + self.int_avg_det_single._set_real_imag(real_imag) + MC.set_detector_function(self.int_avg_det_single) + + label = f'_drive_{cross_driving_qubit}' if cross_driving_qubit else '' + MC.run(name=f'rabi'+self.msmt_suffix+label, + disable_snapshot_metadata=disable_metadata) + a = None + try: + a = ma.Rabi_Analysis(label='rabi_') + except Exception as e: + warnings.warn("Failed to fit Rabi for the cross-driving case.") + + if a: + return a + + def measure_mw_crosstalk(self, MC=None, amps=np.linspace(0, 1, 121), + cross_driving_qb=None,disable_metadata = False, + analyze=True, close_fig=True, real_imag=True, + prepare_for_timedomain=True): + """ + Measure MW crosstalk matrix by measuring two Rabi experiments: + 1. a0 : standand rabi (drive the qubit qj through its dedicated drive line Dj) + 2. a1 : cross-drive rabi (drive the qubit qj through another drive line (Di) + at the freq of the qj) + Args: + amps (array): + range of amplitudes to sweep. If cfg_with_vsm()==True pulse amplitude + is adjusted by sweeping the attenuation of the relevant gaussian VSM channel, + in max range (0.1 to 1.0). + If cfg_with_vsm()==False adjusts the channel amplitude of the AWG in range (0 to 1). + + cross_driving_qubit is qubit qi with its drive line Di. + Relevant parameters: + mw_amp180 (float): + amplitude of the waveform corresponding to pi pulse (from 0 to 1) + + mw_channel_amp (float): + AWG channel amplitude (digitally scaling the waveform; form 0 to 1) + """ + + try: + freq_qj = self.freq_qubit() # set qi to this qubit freq of qubit j + cross_driving_qubit = None + amps=np.linspace(0, 0.1, 51) + a0 = self.measure_rabi_mw_crosstalk(MC, amps,cross_driving_qubit, + analyze, close_fig, real_imag,disable_metadata, + prepare_for_timedomain) + + cross_driving_qubit = cross_driving_qb + qi = self.find_instrument(cross_driving_qubit) + freq_qi = qi.freq_qubit() + qi.freq_qubit(freq_qj) + amps=np.linspace(0, 1, 121) + prepare_for_timedomain = False + a1 = self.measure_rabi_mw_crosstalk(MC, amps,cross_driving_qubit, + analyze, close_fig, real_imag,disable_metadata, + prepare_for_timedomain) + ## set back the right parameters. + qi.freq_qubit(freq_qi) + except: + print_exception() + qi.freq_qubit(freq_qi) + raise Exception('Experiment failed') + + try: + pi_ajj = abs(a0.fit_result.params['period'].value) / 2 + pi_aji = abs(a1.fit_result.params['period'].value) / 2 + + mw_isolation = 20*np.log10(pi_aji/pi_ajj) + + return mw_isolation + except: + mw_isolation = 80 + + def measure_allxy( + self, + MC=None, + label: str = '', + analyze=True, + close_fig=True, + prepare_for_timedomain=True, + prepend_msmt: bool = False, + wait_time_after_prepend_msmt: int = 0, + disable_metadata=False + ): + # docstring from parent class + # N.B. this is a good example for a generic timedomain experiment using + # the CCL transmon. + if MC is None: + MC = self.instr_MC.get_instr() + if prepare_for_timedomain: + self.prepare_for_timedomain() + p = sqo.AllXY( + qubit_idx=self.cfg_qubit_nr(), + platf_cfg=self.cfg_openql_platform_fn(), + double_points=True, + prepend_msmt=prepend_msmt, + wait_time_after_prepend_msmt=wait_time_after_prepend_msmt + ) + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr()) + d = self.int_avg_det + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(42) if not prepend_msmt else np.arange(2*42)) + MC.set_detector_function(d) + MC.run('AllXY'+self.msmt_suffix+label, + disable_snapshot_metadata=disable_metadata) + if analyze: + a = ma.AllXY_Analysis(close_main_fig=close_fig, prepend_msmt=prepend_msmt) + return a.deviation_total + + def allxy_GBT(self, MC=None, + label: str = '', + analyze=True, close_fig=True, + prepare_for_timedomain=True,termination_opt=0.02): + '''# + This function is the same as measure AllXY, but with a termination limit + This termination limit is as a system metric to evalulate the calibration + by GBT if good or not. + ''' + old_avg = self.ro_soft_avg() + self.ro_soft_avg(4) + if MC is None: + MC = self.instr_MC.get_instr() + if prepare_for_timedomain: + self.prepare_for_timedomain() + p = sqo.AllXY(qubit_idx=self.cfg_qubit_nr(), double_points=True, + platf_cfg=self.cfg_openql_platform_fn()) + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr()) + d = self.int_avg_det + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(42)) + MC.set_detector_function(d) + MC.run('AllXY'+label+self.msmt_suffix) + self.ro_soft_avg(old_avg) + a = ma.AllXY_Analysis(close_main_fig=close_fig) + if a.deviation_total > termination_opt: + return False + else: + return True + + def measure_depletion_allxy(self, MC=None, + analyze=True, close_fig=True, + prepare_for_timedomain=True, + label='', + disable_metadata=False): + if MC is None: + MC = self.instr_MC.get_instr() + if prepare_for_timedomain: + self.prepare_for_timedomain() + p = sqo.depletion_AllXY(qubit_idx=self.cfg_qubit_nr(), + platf_cfg=self.cfg_openql_platform_fn()) + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr()) + d = self.int_avg_det + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(21*2*6)) + MC.set_detector_function(d) + MC.run('Depletion_AllXY'+self.msmt_suffix+label, + disable_snapshot_metadata=disable_metadata) + ma2.ra.Depletion_AllXY_analysis(self.name, label='Depletion') + + def calibrate_mw_gates_restless( + self, MC=None, + parameter_list: list = ['G_amp', 'D_amp', 'freq'], + initial_values: list = None, + initial_steps: list = [0.05, 0.05, 1e6], + nr_cliffords: int = 80, nr_seeds: int = 200, + verbose: bool = True, update: bool = True, + prepare_for_timedomain: bool = True): + """ + Refs: + Rol PR Applied 7, 041001 (2017) + """ + + return self.calibrate_mw_gates_rb( + MC=None, + parameter_list=parameter_list, + initial_values=initial_values, + initial_steps=initial_steps, + nr_cliffords=nr_cliffords, nr_seeds=nr_seeds, + verbose=verbose, update=update, + prepare_for_timedomain=prepare_for_timedomain, + method='restless') + + def calibrate_mw_gates_rb( + self, MC=None, + parameter_list: list = ['G_amp', 'D_amp', 'freq'], + initial_values: list = None, + initial_steps: list = [0.05, 0.05, 1e6], + nr_cliffords: int = 80, nr_seeds: int = 200, + verbose: bool = True, update: bool = True, + prepare_for_timedomain: bool = True, + method: bool = None, + optimizer: str = 'NM'): + """ + Calibrates microwave pulses using a randomized benchmarking based + cost-function. + requirements for restless: + - Digitized readout (calibrated) + requirements for ORBIT: + - Optimal weights such that minimizing correspond to 0 state. + """ + if method is None: + method = self.cfg_rb_calibrate_method() + if method == 'restless': + restless = True + else: # ORBIT + restless = False + + if MC is None: + MC = self.instr_MC.get_instr() + + if initial_steps is None: + initial_steps: list = [0.05, 0.05, 1e6] + + if prepare_for_timedomain: + self.prepare_for_timedomain() + + if parameter_list is None: + # parameter_list = ['G_amp', 'D_amp'] + parameter_list = ['G_amp', 'D_amp','freq'] + + mw_lutman = self.instr_LutMan_MW.get_instr() + + G_amp_par = wrap_par_to_swf( + mw_lutman.parameters['channel_amp'], + retrieve_value=True) + D_amp_par = swf.QWG_lutman_par(LutMan=mw_lutman, + LutMan_parameter=mw_lutman.mw_motzoi) + + freq_par = self.instr_LO_mw.get_instr().frequency + + sweep_pars = [] + for par in parameter_list: + if par == 'G_amp': + sweep_pars.append(G_amp_par) + elif par == 'D_amp': + sweep_pars.append(D_amp_par) + elif par == 'freq': + sweep_pars.append(freq_par) + else: + raise NotImplementedError( + "Parameter {} not recognized".format(par)) + + if initial_values is None: + # use the current values of the parameters being varied. + initial_values = [G_amp_par.get(),mw_lutman.mw_motzoi.get(),freq_par.get()] + + # Preparing the sequence + if restless: + net_clifford = 3 # flipping sequence + d = det.UHFQC_single_qubit_statistics_logging_det( + self.instr_acquisition.get_instr(), + self.instr_CC.get_instr(), nr_shots=4*4095, + integration_length=self.ro_acq_integration_length(), + channel=self.ro_acq_weight_chI(), + statemap={'0': '1', '1': '0'}) + minimize = False + msmt_string = 'Restless_tuneup_{}Cl_{}seeds'.format( + nr_cliffords, nr_seeds) + self.msmt_suffix + + else: + net_clifford = 0 # not flipping sequence + d = self.int_avg_det_single + minimize = True + msmt_string = 'ORBIT_tuneup_{}Cl_{}seeds'.format( + nr_cliffords, nr_seeds) + self.msmt_suffix + + p = sqo.randomized_benchmarking( + self.cfg_qubit_nr(), self.cfg_openql_platform_fn(), + nr_cliffords=[nr_cliffords], + net_clifford=net_clifford, nr_seeds=nr_seeds, + restless=restless, cal_points=False) + self.instr_CC.get_instr().eqasm_program(p.filename) + self.instr_CC.get_instr().start() + + MC.set_sweep_functions(sweep_pars) + + MC.set_detector_function(d) + + if optimizer == 'CMA': + ad_func_pars = {'adaptive_function': cma.fmin, + 'x0': initial_values, + 'sigma0': 1, + # 'noise_handler': cma.NoiseHandler(len(initial_values)), + 'minimize': minimize, + 'options': {'cma_stds': initial_steps}} + + elif optimizer == 'NM': + ad_func_pars = {'adaptive_function': nelder_mead, + 'x0': initial_values, + 'initial_step': initial_steps, + 'no_improve_break': 50, + 'minimize': minimize, + 'maxiter': 1500, + 'bounds': []} + + MC.set_adaptive_function_parameters(ad_func_pars) + MC.run(name=msmt_string, + mode='adaptive') + a = ma.OptimizationAnalysis(label=msmt_string) + + if update: + if verbose: + print("Updating parameters in qubit object") + + opt_par_values = a.optimization_result[0] + for par in parameter_list: + if par == 'G_amp': + G_idx = parameter_list.index('G_amp') + self.mw_channel_amp(opt_par_values[G_idx]) + elif par == 'D_amp': + D_idx = parameter_list.index('D_amp') + self.mw_vsm_D_amp(opt_par_values[D_idx]) + elif par == 'D_phase': + D_idx = parameter_list.index('D_phase') + self.mw_vsm_D_phase(opt_par_values[D_idx]) + elif par == 'freq': + freq_idx = parameter_list.index('freq') + # We are varying the LO frequency in the opt, not the q freq. + self.freq_qubit(opt_par_values[freq_idx] + + self.mw_freq_mod.get()) + + def calibrate_mw_gates_allxy(self, nested_MC=None, + start_values=None, + initial_steps=None, + parameter_list=None, + termination_opt=0.01): + # FIXME: this tuneup does not update the qubit object parameters + # update: Fixed on the the pagani set-up + + # FIXME2: this tuneup does not return True upon success + # update: Fixed on the pagani set-up + + if initial_steps is None: + if parameter_list is None: + initial_steps = [1e6, 0.05, 0.05] + else: + raise ValueError( + "must pass initial steps if setting parameter_list") + + if nested_MC is None: + nested_MC = self.instr_nested_MC.get_instr() + + if parameter_list is None: + if self.cfg_with_vsm(): + parameter_list = ["freq_qubit", + "mw_vsm_G_amp", + "mw_vsm_D_amp"] + else: + parameter_list = ["freq_qubit", + "mw_channel_amp", + "mw_motzoi"] + + nested_MC.set_sweep_functions([ + self.__getattr__(p) for p in parameter_list]) + + if start_values is None: + # use current values + start_values = [self.get(p) for p in parameter_list] + + d = det.Function_Detector(self.measure_allxy, + value_names=['AllXY cost'], + value_units=['a.u.'],) + nested_MC.set_detector_function(d) + + ad_func_pars = {'adaptive_function': nelder_mead, + 'x0': start_values, + 'initial_step': initial_steps, + 'no_improve_break': 10, + 'minimize': True, + 'maxiter': 500, + 'f_termination': termination_opt} + + nested_MC.set_adaptive_function_parameters(ad_func_pars) + nested_MC.set_optimization_method('nelder_mead') + nested_MC.run(name='gate_tuneup_allxy', mode='adaptive') + a2 = ma.OptimizationAnalysis(label='gate_tuneup_allxy') + + if a2.optimization_result[1][0] > termination_opt: + return False + else: + return True + + def calibrate_mw_gates_allxy2(self, nested_MC=None, + start_values=None, + initial_steps=None, f_termination=0.01): + ''' + FIXME! Merge both calibrate allxy methods. + Optimizes ALLXY sequency by tunning 2 parameters: + mw_channel_amp and mw_motzoi. + + Used for Graph based tune-up in the ALLXY node. + ''' + old_avg = self.ro_acq_averages() + self.ro_acq_averages(2**14) + + VSM = self.instr_VSM.get_instr() + # Close all vsm channels + modules = range(8) + for module in modules: + VSM.set('mod{}_marker_source'.format(module+1), 'int') + for channel in [1, 2, 3, 4]: + VSM.set('mod{}_ch{}_marker_state'.format( + module+1, channel), 'off') + # Open intended channel + VSM.set('mod{}_marker_source'.format(self.mw_vsm_mod_out()), 'int') + VSM.set('mod{}_ch{}_marker_state'.format( + self.mw_vsm_mod_out(), self.mw_vsm_ch_in()), 'on') + + if initial_steps is None: + initial_steps = [0.05, 0.05] + + if nested_MC is None: + nested_MC = self.instr_nested_MC.get_instr() + + if self.cfg_with_vsm(): + parameter_list = ["mw_vsm_G_amp", + "mw_vsm_D_amp"] + else: + parameter_list = ["mw_channel_amp", + "mw_motzoi"] + + nested_MC.set_sweep_functions([ + self.__getattr__(p) for p in parameter_list]) + + if start_values is None: + # use current values + start_values = [self.get(p) for p in parameter_list] + + d = det.Function_Detector(self.measure_allxy, + value_names=['AllXY cost'], + value_units=['a.u.'],) + nested_MC.set_detector_function(d) + + ad_func_pars = {'adaptive_function': nelder_mead, + 'x0': start_values, + 'initial_step': initial_steps, + 'no_improve_break': 10, + 'minimize': True, + 'maxiter': 500, + 'f_termination': f_termination} + + nested_MC.set_adaptive_function_parameters(ad_func_pars) + nested_MC.set_optimization_method('nelder_mead') + nested_MC.run(name='gate_tuneup_allxy', mode='adaptive') + a2 = ma.OptimizationAnalysis(label='gate_tuneup_allxy') + self.ro_acq_averages(old_avg) + # Open all vsm channels + for module in modules: + VSM.set('mod{}_marker_source'.format(module+1), 'int') + for channel in [1, 2, 3, 4]: + VSM.set('mod{}_ch{}_marker_state'.format( + module+1, channel), 'on') + + if a2.optimization_result[1][0] > f_termination: + return False + else: + return True + + def calibrate_RO(self, nested_MC=None, + start_params=None, + initial_step=None, + threshold=0.05): + ''' + Optimizes the RO assignment fidelity using 2 parameters: + ro_freq and ro_pulse_amp. + + Args: + start_params: Starting parameters for .ro_freq and + .ro_pulse_amp. These have to be passed on in + the aforementioned order, that is: + [ro_freq, ro_pulse_amp]. + + initial_steps: These have to be given in the order: + [ro_freq, ro_pulse_amp] + + threshold: Assignment fidelity error (1-F_a) threshold used in + the optimization. + + Used for Graph based tune-up. + ''' + + # FIXME: Crashes whenever it tries to set the pulse amplitude higher + # than 1. + + if nested_MC is None: + nested_MC = self.instr_nested_MC.get_instr() + + if start_params is None: + start_params = [self.ro_freq(), self.ro_pulse_amp()] + + if initial_step is None: + initial_step = [1.e6, .05] + + nested_MC.set_sweep_functions([self.ro_freq, self.ro_pulse_amp]) + + def wrap_func(): + error = 1 - self.calibrate_optimal_weights()['F_a'] + return error + d = det.Function_Detector(wrap_func, + value_names=['F_a error'], + value_units=['a.u.']) + nested_MC.set_detector_function(d) + + ad_func_pars = {'adaptive_function': nelder_mead, + 'x0': start_params, + 'initial_step': initial_step, + 'no_improve_break': 10, + 'minimize': True, + 'maxiter': 20, + 'f_termination': threshold} + nested_MC.set_adaptive_function_parameters(ad_func_pars) + + nested_MC.set_optimization_method('nelder_mead') + nested_MC.run(name='RO_tuneup', mode='adaptive') + + a = ma.OptimizationAnalysis(label='RO_tuneup') + + if a.optimization_result[1][0] > 0.05: # Fidelity 0.95 + return False + else: + return True + + def calibrate_depletion_pulse( + self, + nested_MC=None, + two_par=True, + amp0=None, + amp1=None, + phi0=180, + phi1=0, + initial_steps=None, + max_iterations=100, + depletion_optimization_window=None, + depletion_analysis_plot=False, + use_RTE_cost_function=False, + use_adaptive_optimizer=False, + adaptive_loss_weight=5, + target_cost=0.02 + ): + """ + this function automatically tunes up a two step, four-parameter + depletion pulse. + It uses the averaged transients for ground and excited state for its + cost function. + + Refs: + Bultnik PR Applied 6, 034008 (2016) + + Args: + two_par: if readout is performed at the symmetry point and in the + linear regime two parameters will suffice. Othen, four + paramters do not converge. + First optimizaing the amplitudes (two paramters) and + then run the full 4 paramaters with the correct initial + amplitudes works. + optimization_window: optimization window determins which part of + the transients will be + nulled in the optimization. By default it uses a + window of 500 ns post depletiona with a 50 ns buffer. + initial_steps: These have to be given in the order + [phi0,phi1,amp0,amp1] for 4-par tuning and + [amp0,amp1] for 2-par tunining + """ + + # FIXME: this calibration does not update the qubit object params + # FIXME2: this calibration does not return a boolean upon success + + # tuneup requires nested MC as the transients detector will use MC + self.ro_pulse_type('up_down_down') + if nested_MC is None: + nested_MC = self.instr_nested_MC.get_instr() + + # setting the initial depletion amplitudes + if amp0 is None: + amp0 = 2*self.ro_pulse_amp() + if amp1 is None: + amp1 = 0.5*self.ro_pulse_amp() + + if depletion_optimization_window is None: + depletion_optimization_window = [ + self.ro_pulse_length()+self.ro_pulse_down_length0() + + self.ro_pulse_down_length1()+50e-9, + self.ro_pulse_length()+self.ro_pulse_down_length0() + + self.ro_pulse_down_length1()+550e-9] + + if two_par: + nested_MC.set_sweep_functions([ + self.ro_pulse_down_amp0, + self.ro_pulse_down_amp1]) + else: + nested_MC.set_sweep_functions([self.ro_pulse_down_phi0, + self.ro_pulse_down_phi1, + self.ro_pulse_down_amp0, + self.ro_pulse_down_amp1]) + + # prepare here once, instead of every time in the detector function + self.prepare_for_timedomain() + + if use_RTE_cost_function: + d = det.Function_Detector( + self.measure_error_fraction, + msmt_kw={'net_gate': 'pi', + 'feedback': False, + 'sequence_type': 'echo'}, + value_names=['error fraction'], + value_units=['au'], + result_keys=['error fraction']) + else: + # preparation needs to be done in detector function + # as we are only sweeping parameters here! + d = det.Function_Detector( + self.measure_transients, + msmt_kw={'depletion_analysis': True, + 'depletion_analysis_plot': depletion_analysis_plot, + 'depletion_optimization_window': depletion_optimization_window, + 'prepare': True}, + value_names=['depletion cost'], + value_units=['au'], + result_keys=['depletion_cost']) + nested_MC.set_detector_function(d) + + if two_par: + if initial_steps is None: + initial_steps = [-0.5*amp0, -0.5*amp1] + if use_adaptive_optimizer: + goal = mk_min_threshold_goal_func( + max_pnts_beyond_threshold=2) + loss = mk_minimization_loss_func( + max_no_improve_in_local=8, + converge_below=target_cost, + volume_weight=adaptive_loss_weight) + amp0_bounds = np.array([0.1*amp0, 2*amp0]) + amp1_bounds = np.array([0.1*amp1, 2*amp1]) + ad_func_pars = {'adaptive_function': LearnerND_Minimizer, + 'goal': lambda l: goal(l) or l.npoints >= max_iterations, + 'bounds': [amp0_bounds, amp1_bounds], + 'loss_per_simplex': loss, + 'minimize': True, + 'X0': np.array([np.linspace(*amp0_bounds, 10), + np.linspace(*amp1_bounds, 10)]).T } + else: + ad_func_pars = {'adaptive_function': nelder_mead, + 'x0': [amp0, amp1], + 'initial_step': initial_steps, + 'no_improve_break': 8, + 'no_improve_thr': target_cost/10, + 'minimize': True, + 'maxiter': max_iterations} + self.ro_pulse_down_phi0(180) + self.ro_pulse_down_phi1(0) + else: + if initial_steps is None: + initial_steps = [10, 10, -0.1*amp0, -0.1*amp1] + if use_adaptive_optimizer: + goal = mk_min_threshold_goal_func( + max_pnts_beyond_threshold=2) + loss = mk_minimization_loss_func( + max_no_improve_in_local=8, + converge_below=target_cost, + volume_weight=adaptive_loss_weight) + ph0_bounds = np.array([150, 210]) + ph1_bounds = np.array([0, 30]) + amp0_bounds = np.array([0.1*amp0, 2*amp0]) + amp1_bounds = np.array([0.1*amp1, 2*amp1]) + ad_func_pars = {'adaptive_function': LearnerND_Minimizer, + 'goal': lambda l: goal(l) or l.npoints >= max_iterations, + 'bounds': [ph0_bounds, ph1_bounds, + amp0_bounds, amp1_bounds], + 'loss_per_simplex': loss, + 'minimize': True, + 'X0': np.array([np.linspace(*ph0_bounds, 10), + np.linspace(*ph1_bounds, 10), + np.linspace(*amp0_bounds, 10), + np.linspace(*amp1_bounds, 10)]).T } + else: + ad_func_pars = {'adaptive_function': nelder_mead, + 'x0': [phi0, phi1, amp0, amp1], + 'initial_step': initial_steps, + 'no_improve_break': 8, + 'no_improve_thr': target_cost/10, + 'minimize': True, + 'maxiter': max_iterations} + + nested_MC.set_adaptive_function_parameters(ad_func_pars) + if use_adaptive_optimizer: + nested_MC.set_optimization_method('adaptive') + else: + nested_MC.set_optimization_method('nelder_mead') + + optimizer_result = nested_MC.run( + f"Depletion_tuneup_{self.name}_adaptive-{use_adaptive_optimizer}", + mode='adaptive') + a = ma.OptimizationAnalysis(label='Depletion_tuneup') + + return a.optimization_result, optimizer_result + + def measure_error_fraction(self, MC=None, analyze: bool = True, + nr_shots: int = 2048*4, + sequence_type='echo', prepare: bool = True, + feedback=False, + depletion_time=None, net_gate='pi'): + """ + This performs a multiround experiment, the repetition rate is defined + by the ro_duration which can be changed by regenerating the + configuration file. + The analysis counts single errors. The definition of an error is + adapted automatically by choosing feedback or the net_gate. + it requires high SNR single shot readout and a calibrated threshold. + """ + self.ro_acq_digitized(True) + if MC is None: + MC = self.instr_MC.get_instr() + + # plotting really slows down SSRO (16k shots plotting is slow) + old_plot_setting = MC.live_plot_enabled() + MC.live_plot_enabled(False) + MC.soft_avg(1) # don't want to average single shots + if prepare: + self.prepare_for_timedomain() + # off/on switching is achieved by turning the MW source on and + # off as this is much faster than recompiling/uploading + p = sqo.RTE( + qubit_idx=self.cfg_qubit_nr(), sequence_type=sequence_type, + platf_cfg=self.cfg_openql_platform_fn(), net_gate=net_gate, + feedback=feedback) + self.instr_CC.get_instr().eqasm_program(p.filename) + else: + p = None # object needs to exist for the openql_sweep to work + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name='shot nr', unit='#', + upload=prepare) + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(nr_shots)) + d = self.int_log_det + MC.set_detector_function(d) + + exp_metadata = {'feedback': feedback, 'sequence_type': sequence_type, + 'depletion_time': depletion_time, 'net_gate': net_gate} + suffix = 'depletion_time_{}_ro_pulse_{}_feedback_{}_net_gate_{}'.format( + depletion_time, self.ro_pulse_type(), feedback, net_gate) + MC.run( + 'RTE_{}_{}'.format(self.msmt_suffix, suffix), + exp_metadata=exp_metadata) + MC.live_plot_enabled(old_plot_setting) + if analyze: + a = ma2.Single_Qubit_RoundsToEvent_Analysis( + t_start=None, t_stop=None, + options_dict={'typ_data_idx': 0, + 'scan_label': 'RTE'}, + extract_only=True) + return {'error fraction': a.proc_data_dict['frac_single']} + + def measure_T1( + self, + times: List[float]=None, + prepare_for_timedomain=True, + update=True, + close_fig=True, + analyze=True, + disable_metadata: bool = False, + T1_VS_flux : bool = False, + MC=None, + ): + """ + N.B. this is a good example for a generic timedomain experiment using + the CCL transmon. + + """ + + if MC is None: + MC = self.instr_MC.get_instr() + if times is None: + times = np.linspace(0, 4*self.T1(), 51) + dt = times[1] - times[0] + times = np.concatenate([times, (times[-1]+1*dt, + times[-1]+2*dt, + times[-1]+3*dt, + times[-1]+4*dt) ]) + if prepare_for_timedomain: + self.prepare_for_timedomain() + p = sqo.T1(qubit_idx=self.cfg_qubit_nr(), + T1_VS_flux = T1_VS_flux, + platf_cfg=self.cfg_openql_platform_fn(), + times=times) + s = swf.OpenQL_Sweep(openql_program=p, + parameter_name='Time', + unit='s', + CCL=self.instr_CC.get_instr()) + MC.set_sweep_function(s) + MC.set_sweep_points(times) + MC.set_detector_function(self.int_avg_det) + MC.run('T1'+self.msmt_suffix,disable_snapshot_metadata=disable_metadata) + if analyze: + a = ma.T1_Analysis(auto=True, close_fig=True) + if update: + self.T1(a.T1) + return a.T1 + + def measure_T1_2nd_excited_state(self, times=None, MC=None, + analyze=True, close_fig=True, update=True, + prepare_for_timedomain=True): + """ + Performs a T1 experiment on the 2nd excited state. + """ + if MC is None: + MC = self.instr_MC.get_instr() + + # default timing + if times is None: + times = np.linspace(0, self.T1()*4, 31) + + if prepare_for_timedomain: + self.prepare_for_timedomain() + + # Load pulses to the ef transition + mw_lutman = self.instr_LutMan_MW.get_instr() + mw_lutman.load_ef_rabi_pulses_to_AWG_lookuptable() + + p = sqo.T1_second_excited_state(times, qubit_idx=self.cfg_qubit_nr(), + platf_cfg=self.cfg_openql_platform_fn()) + s = swf.OpenQL_Sweep(openql_program=p, + parameter_name='Time', + unit='s', + CCL=self.instr_CC.get_instr()) + d = self.int_avg_det + MC.set_sweep_function(s) + MC.set_sweep_points(p.sweep_points) + MC.set_detector_function(d) + MC.run('T1_2nd_exc_state_'+self.msmt_suffix) + a = ma.T1_Analysis(auto=True, close_fig=True) + return a.T1 + + def measure_ramsey( + self, + times: List[float]=None, + artificial_detuning: float=None, + freq_qubit: float=None, + label: str='', + MC=None, + prepare_for_timedomain=True, + T2_VS_flux : bool = False, + analyze=True, + update=True, + double_fit=False, + test_beating=True, + close_fig=True, + disable_metadata=False, + ): + """ + # docstring from parent class + # N.B. this is a good example for a generic timedomain experiment using + # the CCL transmon. + + """ + if MC is None: + MC = self.instr_MC.get_instr() + + if times is None: + # funny default is because there is no real time sideband modulation + stepsize = max( (self.T2_star()*4/61)//abs(self.cfg_cycle_time())*abs(self.cfg_cycle_time()), + 40e-9) + # default timing: 4 x current T2* + times = np.arange(0, self.T2_star()*4, stepsize) + + # append the calibration points, times are for location in plot + dt = times[1] - times[0] + times = np.concatenate([times, (times[-1]+1*dt, + times[-1]+2*dt, + times[-1]+3*dt, + times[-1]+4*dt)]) + + if prepare_for_timedomain: + self.prepare_for_timedomain() + + if freq_qubit is None: + freq_qubit = self.freq_qubit() + + # adding 'artificial' detuning by detuning the qubit LO + # this should have no effect if artificial detuning = 0. + # NOTE: This is actually real detuning (added artificially), not artificial detuning + if artificial_detuning is None: + artificial_detuning = 5/times[-1] + old_frequency = self.instr_LO_mw.get_instr().get('frequency') + self.instr_LO_mw.get_instr().set( + 'frequency', freq_qubit - + self.mw_freq_mod.get() + artificial_detuning) + + p = sqo.Ramsey(qubit_idx=self.cfg_qubit_nr(), + platf_cfg=self.cfg_openql_platform_fn(), + times=times, + T2_VS_flux=T2_VS_flux) + + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name='Time', unit='s') + + MC.set_sweep_function(s) + MC.set_sweep_points(times) + MC.set_detector_function(self.int_avg_det) + MC.run('Ramsey'+label+self.msmt_suffix, + disable_snapshot_metadata=disable_metadata) + + # Restore old frequency value + self.instr_LO_mw.get_instr().set('frequency', old_frequency) + + if analyze: + a = ma.Ramsey_Analysis(auto=True, close_fig=True, + freq_qubit=freq_qubit, + artificial_detuning=artificial_detuning) + if test_beating and a.fit_res.chisqr > 0.4: + log.warning('Found double frequency in Ramsey: large ' + 'deviation found in single frequency fit.' + 'Trying double frequency fit.') + double_fit = True + if update: + self.T2_star(a.T2_star['T2_star']) + if double_fit: + b = ma.DoubleFrequency() + res = { + 'T2star1': b.tau1, + 'T2star2': b.tau2, + 'frequency1': b.f1, + 'frequency2': b.f2 + } + return res + + else: + res = { + 'T2star': a.T2_star['T2_star'], + 'frequency': a.qubit_frequency, + } + return res + + def measure_complex_ramsey(self, times=None, MC=None, + freq_qubit: float = None, + label: str = '', + prepare_for_timedomain=True, + analyze=True, close_fig=True, update=True, + detector=False, + double_fit=False, + test_beating=True): + # docstring from parent class + # N.B. this is a good example for a generic timedomain experiment using + # the CCL transmon. + if MC is None: + MC = self.instr_MC.get_instr() + + # readout must use IQ data + old_ro_type = self.ro_acq_weight_type() + self.ro_acq_weight_type('optimal IQ') + + # default timing + if times is None: + # funny default is because there is no real time sideband + # modulation + stepsize = max((self.T2_star()*4/61)//(abs(self.cfg_cycle_time())) + * abs(self.cfg_cycle_time()), 40e-9) + times = np.arange(0, self.T2_star()*4, stepsize) + + # append the calibration points, times are for location in plot + dt = times[1] - times[0] + times = np.concatenate([np.repeat(times,2), + (times[-1]+1*dt, + times[-1]+2*dt, + times[-1]+3*dt, + times[-1]+4*dt)]) + + if prepare_for_timedomain: + self.prepare_for_timedomain() + + # adding 'artificial' detuning by detuning the qubit LO + if freq_qubit is None: + freq_qubit = self.freq_qubit() + # # this should have no effect if artificial detuning = 0. This is a bug, + # This is real detuning, not artificial detuning + + + p = sqo.complex_Ramsey(times, qubit_idx=self.cfg_qubit_nr(), + platf_cfg=self.cfg_openql_platform_fn()) + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name='Time', unit='s') + MC.set_sweep_function(s) + MC.set_sweep_points(times) + + d = self.int_avg_det + MC.set_detector_function(d) + + MC.run('complex_Ramsey'+label+self.msmt_suffix) + self.ro_acq_weight_type(old_ro_type) + + if analyze: + a = ma2.ComplexRamseyAnalysis(label='complex_Ramsey', close_figs=True) + if update: + fit_res = a.fit_dicts['exp_fit']['fit_res'] + fit_frequency = fit_res.params['frequency'].value + freq_qubit = self.freq_qubit() + self.freq_qubit(freq_qubit + fit_frequency) + # if test_beating and a.fit_res.chisqr > 0.4: + # log.warning('Found double frequency in Ramsey: large ' + # 'deviation found in single frequency fit.' + # 'Trying double frequency fit.') + # double_fit = True + # if update: + # self.T2_star(a.T2_star['T2_star']) + # if double_fit: + # b = ma.DoubleFrequency() + # res = { + # 'T2star1': b.tau1, + # 'T2star2': b.tau2, + # 'frequency1': b.f1, + # 'frequency2': b.f2 + # } + # return res + + # else: + # res = { + # 'T2star': a.T2_star['T2_star'], + # 'frequency': a.qubit_frequency, + # } + # return res + + def measure_msmt_induced_dephasing(self, MC=None, sequence='ramsey', + label: str = '', + verbose: bool = True, + analyze: bool = True, + close_fig: bool = True, + update: bool = True, + cross_target_qubits: list = None, + multi_qubit_platf_cfg=None, + target_qubit_excited=False, + extra_echo=False): + # docstring from parent class + + # Refs: + # Schuster PRL 94, 123602 (2005) + # Gambetta PRA 74, 042318 (2006) + if MC is None: + MC = self.instr_MC.get_instr() + if cross_target_qubits is None: + platf_cfg = self.cfg_openql_platform_fn() + else: + platf_cfg = multi_qubit_platf_cfg + + self.prepare_for_timedomain() + self.instr_LutMan_MW.get_instr().load_phase_pulses_to_AWG_lookuptable() + if cross_target_qubits is None: + qubits = [self.cfg_qubit_nr()] + else: + qubits = [] + for cross_target_qubit in cross_target_qubits: + qubits.append(cross_target_qubit.cfg_qubit_nr()) + qubits.append(self.cfg_qubit_nr()) + + # angles = np.arange(0, 421, 20) + angles = np.concatenate( + [np.arange(0, 101, 20), np.arange(140, 421, 20)]) # avoid CW15, issue + + if sequence == 'ramsey': + readout_pulse_length = self.ro_pulse_length() + readout_pulse_length += self.ro_pulse_down_length0() + readout_pulse_length += self.ro_pulse_down_length1() + if extra_echo: + wait_time = readout_pulse_length/2+0e-9 + else: + wait_time = 0 + + p = mqo.Ramsey_msmt_induced_dephasing(qubits=qubits, angles=angles, + platf_cfg=platf_cfg, + target_qubit_excited=target_qubit_excited, + extra_echo=extra_echo, + wait_time=wait_time) + elif sequence == 'echo': + readout_pulse_length = self.ro_pulse_length() + readout_pulse_length += self.ro_pulse_down_length0() + readout_pulse_length += self.ro_pulse_down_length1() + if extra_echo: + wait_time = readout_pulse_length/2+20e-9 + else: + wait_time = readout_pulse_length+40e-9 + p = mqo.echo_msmt_induced_dephasing(qubits=qubits, angles=angles, + platf_cfg=platf_cfg, + wait_time=wait_time, + target_qubit_excited=target_qubit_excited, + extra_echo=extra_echo) + else: + raise ValueError('sequence must be set to ramsey or echo') + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name='angle', unit='degree') + MC.set_sweep_function(s) + MC.set_sweep_points(angles) + d = self.int_avg_det + MC.set_detector_function(d) + MC.run(sequence+label+self.msmt_suffix) + if analyze: + a = ma.Ramsey_Analysis(label=sequence, auto=True, close_fig=True, + freq_qubit=self.freq_qubit(), + artificial_detuning=0, # fixme + phase_sweep_only=True) + phase_deg = (a.fit_res.params['phase'].value)*360/(2*np.pi) % 360 + res = { + 'coherence': a.fit_res.params['amplitude'].value, + 'phase': phase_deg, + } + if verbose: + print('> ramsey analyse', res) + return res + # else: + # return {'coherence': -1, + # 'phase' : -1} + + def measure_echo(self, times=None, MC=None, + analyze=True, close_fig=True, update=True, + disable_metadata: bool = False, + label: str = '', prepare_for_timedomain=True): + # docstring from parent class + # N.B. this is a good example for a generic timedomain experiment using + # the CCL transmon. + if MC is None: + MC = self.instr_MC.get_instr() + + # default timing + if times is None: + # funny default is because there is no real time sideband + # modulation + stepsize = max((self.T2_echo()*2/61)//(abs(self.cfg_cycle_time())) + * abs(self.cfg_cycle_time()), 20e-9) + times = np.arange(0, self.T2_echo()*4, stepsize*2) + + # append the calibration points, times are for location in plot + dt = times[1] - times[0] + times = np.concatenate([times, + (times[-1]+1*dt, + times[-1]+2*dt, + times[-1]+3*dt, + times[-1]+4*dt)]) + + mw_lutman = self.instr_LutMan_MW.get_instr() + # # Checking if pulses are on 20 ns grid + if not all([np.round(t*1e9) % (2*self.cfg_cycle_time()*1e9) == 0 for + t in times]): + raise ValueError('timesteps must be multiples of 40e-9') + + # # Checking if pulses are locked to the pulse modulation + if not all([np.round(t/1*1e9) % (2/self.mw_freq_mod.get()*1e9) == 0 for t in times]) and\ + mw_lutman.cfg_sideband_mode() != 'real-time': + raise ValueError( + 'timesteps must be multiples of 2 modulation periods') + + if prepare_for_timedomain: + self.prepare_for_timedomain() + mw_lutman.load_phase_pulses_to_AWG_lookuptable() + p = sqo.echo(times, qubit_idx=self.cfg_qubit_nr(), + platf_cfg=self.cfg_openql_platform_fn()) + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name="Time", unit="s") + d = self.int_avg_det + MC.set_sweep_function(s) + MC.set_sweep_points(times) + MC.set_detector_function(d) + MC.run('echo'+label+self.msmt_suffix, + disable_snapshot_metadata=disable_metadata) + if analyze: + # N.B. v1.5 analysis + a = ma.Echo_analysis_V15(label='echo', auto=True, close_fig=True) + if update: + self.T2_echo(a.fit_res.params['tau'].value) + return a + + def measure_CPMG(self, times=None, orders=None, MC=None, sweep='tau', + analyze=True, close_fig=True, update=False, + label: str = '', prepare_for_timedomain=True): + # docstring from parent class + # N.B. this is a good example for a generic timedomain experiment using + # the CCL transmon. + if MC is None: + MC = self.instr_MC.get_instr() + + + # default timing + if times is None and sweep == 'tau': + # funny default is because there is no real time sideband + # modulation + stepsize = max((self.T2_echo()*2/61)//(abs(self.cfg_cycle_time())) + * abs(self.cfg_cycle_time()), 20e-9) + times = np.arange(0, self.T2_echo()*4, stepsize*2) + + if orders is None and sweep == 'tau': + orders = 2 + if orders<1 and sweep =='tau': + raise ValueError( + 'Orders must be larger than 1') + + + + + # append the calibration points, times are for location in plot + if sweep == 'tau': + dt = times[1] - times[0] + times = np.concatenate([times, + (times[-1]+1*dt, + times[-1]+2*dt, + times[-1]+3*dt, + times[-1]+4*dt)]) + elif sweep == 'order': + dn = orders[1] - orders[0] + orders = np.concatenate([orders, + (orders[-1]+1*dn, + orders[-1]+2*dn, + orders[-1]+3*dn, + orders[-1]+4*dn)]) + # # Checking if pulses are on 20 ns grid + if sweep == 'tau': + if not all([np.round((t*1e9)/(2*orders)) % (self.cfg_cycle_time()*1e9) == 0 for + t in times]): + raise ValueError('timesteps must be multiples of 40e-9') + elif sweep == 'order': + if not np.round(times/2) % (self.cfg_cycle_time()*1e9) == 0: + raise ValueError('timesteps must be multiples of 40e-9') + + # # Checking if pulses are locked to the pulse modulation + if sweep == 'tau': + if not all([np.round(t/1*1e9) % (2/self.mw_freq_mod.get()*1e9) + == 0 for t in times]): + raise ValueError( + 'timesteps must be multiples of 2 modulation periods') + + if prepare_for_timedomain: + self.prepare_for_timedomain() + mw_lutman = self.instr_LutMan_MW.get_instr() + mw_lutman.load_phase_pulses_to_AWG_lookuptable() + if sweep == 'tau': + print(times) + p = sqo.CPMG(times, orders, qubit_idx=self.cfg_qubit_nr(), + platf_cfg=self.cfg_openql_platform_fn()) + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name="Time", unit="s") + elif sweep == 'order': + p = sqo.CPMG_SO(times, orders, qubit_idx=self.cfg_qubit_nr(), + platf_cfg=self.cfg_openql_platform_fn()) + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name="Order", unit="") + d = self.int_avg_det + MC.set_sweep_function(s) + if sweep == 'tau': + MC.set_sweep_points(times) + elif sweep == 'order': + MC.set_sweep_points(orders) + MC.set_detector_function(d) + if sweep == 'tau': + msmt_title = 'CPMG_order_'+str(orders)+label+self.msmt_suffix + elif sweep == 'order': + msmt_title = 'CPMG_tauN_'+str(times)+label+self.msmt_suffix + MC.run(msmt_title) + if analyze: + # N.B. v1.5 analysis + if sweep == 'tau': + a = ma.Echo_analysis_V15(label='CPMG', auto=True, close_fig=True) + if update: + self.T2_echo(a.fit_res.params['tau'].value) + elif sweep == 'order': + a = ma2.Single_Qubit_TimeDomainAnalysis(label='CPMG', auto=True, close_fig=True) + + return a + + def measure_spin_locking_simple(self, times=None, MC=None, + analyze=True, close_fig=True, update=True, + label: str = '', prepare_for_timedomain=True, + tomo=False): + # docstring from parent class + # N.B. this is a good example for a generic timedomain experiment using + # the CCL transmon. + if MC is None: + MC = self.instr_MC.get_instr() + + # default timing + if times is None: + # funny default is because there is no real time sideband + # modulation + stepsize = max((self.T2_echo()*2/61)//(abs(self.cfg_cycle_time())) + * abs(self.cfg_cycle_time()), 20e-9) + times = np.arange(0, self.T2_echo()*4, stepsize*2) + + # append the calibration points, times are for location in plot + dt = times[1] - times[0] + if tomo: + times = np.concatenate([np.repeat(times,3), + (times[-1]+1*dt, + times[-1]+2*dt, + times[-1]+3*dt, + times[-1]+4*dt, + times[-1]+5*dt, + times[-1]+6*dt)]) + else: + times = np.concatenate([times, + (times[-1]+1*dt, + times[-1]+2*dt, + times[-1]+3*dt, + times[-1]+4*dt)]) + + # # Checking if pulses are on 20 ns grid + if not all([np.round(t*1e9) % (self.cfg_cycle_time()*1e9) == 0 for + t in times]): + raise ValueError('timesteps must be multiples of 20e-9') + + # # Checking if pulses are locked to the pulse modulation + if not all([np.round(t/1*1e9) % (2/self.mw_freq_mod.get()*1e9) + == 0 for t in times]): + raise ValueError( + 'timesteps must be multiples of 2 modulation periods') + + if prepare_for_timedomain: + self.prepare_for_timedomain() + mw_lutman = self.instr_LutMan_MW.get_instr() + mw_lutman.load_square_waves_to_AWG_lookuptable() + p = sqo.spin_lock_simple(times, qubit_idx=self.cfg_qubit_nr(), + platf_cfg=self.cfg_openql_platform_fn(), tomo=tomo) + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name="Time", unit="s") + d = self.int_avg_det + MC.set_sweep_function(s) + MC.set_sweep_points(times) + MC.set_detector_function(d) + MC.run('spin_lock_simple'+label+self.msmt_suffix) + + if analyze: + a = ma.T1_Analysis(label='spin_lock_simple', auto=True, close_fig=True) + return a + + def measure_spin_locking_echo(self, times=None, MC=None, + analyze=True, close_fig=True, update=True, + label: str = '', prepare_for_timedomain=True): + # docstring from parent class + # N.B. this is a good example for a generic timedomain experiment using + # the CCL transmon. + if MC is None: + MC = self.instr_MC.get_instr() + + # default timing + if times is None: + # funny default is because there is no real time sideband + # modulation + stepsize = max((self.T2_echo()*2/61)//(abs(self.cfg_cycle_time())) + * abs(self.cfg_cycle_time()), 20e-9) + times = np.arange(0, self.T2_echo()*4, stepsize*2) + + # append the calibration points, times are for location in plot + dt = times[1] - times[0] + times = np.concatenate([times, + (times[-1]+1*dt, + times[-1]+2*dt, + times[-1]+3*dt, + times[-1]+4*dt)]) + + # # Checking if pulses are on 20 ns grid + if not all([np.round(t*1e9) % (self.cfg_cycle_time()*1e9) == 0 for + t in times]): + raise ValueError('timesteps must be multiples of 20e-9') + + # # Checking if pulses are locked to the pulse modulation + if not all([np.round(t/1*1e9) % (2/self.mw_freq_mod.get()*1e9) + == 0 for t in times]): + raise ValueError( + 'timesteps must be multiples of 2 modulation periods') + + if prepare_for_timedomain: + self.prepare_for_timedomain() + mw_lutman = self.instr_LutMan_MW.get_instr() + mw_lutman.load_square_waves_to_AWG_lookuptable() + p = sqo.spin_lock_echo(times, qubit_idx=self.cfg_qubit_nr(), + platf_cfg=self.cfg_openql_platform_fn()) + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name="Time", unit="s") + d = self.int_avg_det + MC.set_sweep_function(s) + MC.set_sweep_points(times) + MC.set_detector_function(d) + MC.run('spin_lock_echo'+label+self.msmt_suffix) + + if analyze: + a = ma.T1_Analysis(label='spin_lock_echo', auto=True, close_fig=True) + return a + + def measure_rabi_frequency(self, times=None, MC=None, + analyze=True, close_fig=True, update=True, + label: str = '', prepare_for_timedomain=True, + tomo=False): + # docstring from parent class + # N.B. this is a good example for a generic timedomain experiment using + # the CCL transmon. + if MC is None: + MC = self.instr_MC.get_instr() + + # default timing + if times is None: + # funny default is because there is no real time sideband + # modulation + stepsize = max((self.T2_echo()*2/61)//(abs(self.cfg_cycle_time())) + * abs(self.cfg_cycle_time()), 40e-9) + times = np.arange(0, self.T2_echo()*4, stepsize*2) + + # append the calibration points, times are for location in plot + dt = times[1] - times[0] + if tomo: + times = np.concatenate([np.repeat(times,3), + (times[-1]+1*dt, + times[-1]+2*dt, + times[-1]+3*dt, + times[-1]+4*dt, + times[-1]+5*dt, + times[-1]+6*dt)]) + else: + times = np.concatenate([times, + (times[-1]+1*dt, + times[-1]+2*dt, + times[-1]+3*dt, + times[-1]+4*dt)]) + + # # # Checking if pulses are on 20 ns grid + # if not all([np.round(t*1e9) % (self.cfg_cycle_time()*1e9) == 0 for + # t in times]): + # raise ValueError('timesteps must be multiples of 40e-9') + + # # # Checking if pulses are locked to the pulse modulation + # if not all([np.round(t/1*1e9) % (2/self.mw_freq_mod.get()*1e9) + # == 0 for t in times]): + # raise ValueError( + # 'timesteps must be multiples of 2 modulation periods') + + if prepare_for_timedomain: + self.prepare_for_timedomain() + mw_lutman = self.instr_LutMan_MW.get_instr() + mw_lutman.load_square_waves_to_AWG_lookuptable() + p = sqo.rabi_frequency(times, qubit_idx=self.cfg_qubit_nr(), + platf_cfg=self.cfg_openql_platform_fn(), tomo=tomo) + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name="Time", unit="s") + d = self.int_avg_det + MC.set_sweep_function(s) + MC.set_sweep_points(times) + MC.set_detector_function(d) + MC.run('rabi_frequency'+label+self.msmt_suffix) + + if analyze: + a = ma.Echo_analysis_V15(label='rabi_frequency', auto=True, close_fig=True) + return a + + def measure_flipping( + self, + number_of_flips=np.arange(0, 61, 2), + equator=True, + prepare_for_timedomain=True, + MC=None, + analyze=True, + close_fig=True, + update=False, + flip_ef=False, + flip_fh=False, + ax='x', + angle='180', + label='', + disable_metadata=False): + """ + Measurement for fine-tuning of the pi and pi/2 pulse amplitudes. Executes sequence + pi (repeated N-times) - pi/2 - measure + with variable number N. In this way the error in the amplitude of the MW pi pulse + accumulate allowing for fine tuning. Alternatively N repetitions of the pi pulse + can be replaced by 2N repetitions of the pi/2-pulse + + Args: + number_of_flips (array): + number of pi pulses to apply. It is recommended to use only even numbers, + since then the expected signal has a sine shape. Otherwise it has -1^N * sin shape + which will not be correctly analyzed. + + equator (bool); + specify whether to apply the final pi/2 pulse. Setting to False makes the sequence + first-order insensitive to pi-pulse amplitude errors. + + ax (str {'x', 'y'}): + axis arour which the pi pulses are to be performed. Possible values 'x' or 'y' + + angle (str {'90', '180'}):r + specifies whether to apply pi or pi/2 pulses. Possible values: '180' or '90' + + update (bool): + specifies whether to update parameter controlling MW pulse amplitude. + This parameter is mw_vsm_G_amp in VSM case or mw_channel_amp in no-VSM case. + Update is performed only if change by more than 0.2% (0.36 deg) is needed. + """ + + if MC is None: + MC = self.instr_MC.get_instr() + + # allow flipping only with pi/2 or pi, and x or y pulses + assert angle in ['90','180'] + assert ax.lower() in ['x', 'y'] + + # append the calibration points, times are for location in plot + nf = np.array(number_of_flips) + dn = nf[1] - nf[0] + nf = np.concatenate([nf, + (nf[-1]+1*dn, + nf[-1]+2*dn, + nf[-1]+3*dn, + nf[-1]+4*dn) ]) + if prepare_for_timedomain: + self.prepare_for_timedomain() + p = sqo.flipping(number_of_flips=nf, + equator=equator, + flip_ef=flip_ef, + flip_fh=flip_fh, + qubit_idx=self.cfg_qubit_nr(), + platf_cfg=self.cfg_openql_platform_fn(), + ax=ax.lower(), angle=angle) + s = swf.OpenQL_Sweep(openql_program=p, + unit='#', + CCL=self.instr_CC.get_instr()) + d = self.int_avg_det + MC.set_sweep_function(s) + MC.set_sweep_points(nf) + MC.set_detector_function(d) + if flip_ef: + label = 'ef_rx12' + elif flip_fh: + label = 'fh_rx23' + MC.run('flipping_'+ax+angle+label+self.msmt_suffix, + disable_snapshot_metadata=disable_metadata) + if analyze: + a = ma2.FlippingAnalysis( + options_dict={'scan_label': 'flipping'}) + + if update: + # choose scale factor based on simple goodness-of-fit comparison + # This method gives priority to the line fit: + # the cos fit will only be chosen if its chi^2 relative to the + # chi^2 of the line fit is at least 10% smaller + if (a.fit_res['line_fit'].chisqr - a.fit_res['cos_fit'].chisqr)/a.fit_res['line_fit'].chisqr \ + > 0.1: + scale_factor = a._get_scale_factor_cos() + else: + scale_factor = a._get_scale_factor_line() + + if abs(scale_factor-1) < 1e-3: + print('Pulse amplitude accurate within 0.1%. Amplitude not updated.') + return True + + if angle == '180': + if flip_ef: + amp_old = self.mw_ef_amp() + if scale_factor*amp_old > 1: + self.mw_ef_amp(1) + else: + self.mw_ef_amp(scale_factor*amp_old) + elif flip_fh: + amp_old = self.mw_fh_amp() + self.mw_fh_amp(scale_factor*amp_old) + else: + amp_old = self.mw_channel_amp() + self.mw_channel_amp(scale_factor*amp_old) + + + elif angle == '90': + amp_old = self.mw_amp90_scale() + self.mw_amp90_scale(scale_factor*amp_old) + + print('Pulse amplitude for {}-{} pulse changed from {:.3f} to {:.3f}'.format( + ax, angle, amp_old, scale_factor*amp_old)) + + return a + + def flipping_GBT(self, nr_sequence: int = 2): + ''' + This function is to measure flipping sequence for whaterver nr_of times + a function needs to be run to calibrate the Pi and Pi/2 Pulse. + Right now this method will always return true no matter what + Later we can add a condition as a check. + ''' + for i in range(nr_sequence): + a = self.measure_flipping(update=True) + scale_factor = a._get_scale_factor_line() + if abs(1-scale_factor)<0.0005: + return True + else: + return False + + def measure_motzoi(self, motzoi_amps=None, + prepare_for_timedomain: bool = True, + MC=None, analyze=True, close_fig=True, + disable_metadata=False): + """ + Sweeps the amplitude of the DRAG coefficients looking for leakage reduction + and optimal correction for the phase error due to stark shift resulting + from transition to higher qubit states. In this measurement the two-pulse + sequence are applied: + X180-Y90 and Y180-X90 and the amplitude of the gaussian-derivative component + of the MW pulse is sweeped. When the DRAG coefficient is adjusted correctly + the two sequences yield the same result. + + Refs: + Motzoi PRL 103, 110501 (2009) + Chow PRA 82, 040305(R) (2010) + Lucero PRA 82, 042339 (2010) + + Args: + motzoi_amps (array): + DRAG coefficients to sweep over. In VSM case the amplitude + is adjusted by varying attenuation of the derivative channel for the + relevant module. In no-VSM the DRAG parameter is adjusted by reloading + of the waveform on the AWG. + + Returns: + float: + value of the DRAG parameter for which the two sequences yield the same result + error is mimimized. + """ + using_VSM = self.cfg_with_vsm() + MW_LutMan = self.instr_LutMan_MW.get_instr() + AWG = MW_LutMan.AWG.get_instr() + + if MC is None: + MC = self.instr_MC.get_instr() + if prepare_for_timedomain: + self.prepare_for_timedomain() + p = sqo.motzoi_XY( + qubit_idx=self.cfg_qubit_nr(), + platf_cfg=self.cfg_openql_platform_fn()) + self.instr_CC.get_instr().eqasm_program(p.filename) + + d = self.get_int_avg_det(single_int_avg=True, values_per_point=2, + values_per_point_suffex=['yX', 'xY'], + always_prepare=True) + + if using_VSM: + VSM = self.instr_VSM.get_instr() + if motzoi_amps is None: + motzoi_amps = np.linspace(0.1, 1.0, 31) + mod_out = self.mw_vsm_mod_out() + ch_in = self.mw_vsm_ch_in() + D_par = VSM.parameters['mod{}_ch{}_derivative_amp'.format( + mod_out, ch_in)] + swf_func = wrap_par_to_swf(D_par, retrieve_value=True) + else: + if self._using_QWG(): + if motzoi_amps is None: + motzoi_amps = np.linspace(-.3, .3, 31) + swf_func = swf.QWG_lutman_par(LutMan=MW_LutMan, + LutMan_parameter=MW_LutMan.mw_motzoi) + else: + if motzoi_amps is None: + motzoi_amps = np.linspace(-.3, .3, 31) + swf_func = swf.lutman_par(LutMan=MW_LutMan, + LutMan_parameter=MW_LutMan.mw_motzoi) + + MC.set_sweep_function(swf_func) + MC.set_sweep_points(motzoi_amps) + MC.set_detector_function(d) + + MC.run('Motzoi_XY'+self.msmt_suffix, + disable_snapshot_metadata=disable_metadata) + if analyze: + if self.ro_acq_weight_type() == 'optimal': + a = ma2.Intersect_Analysis( + options_dict={'ch_idx_A': 0, + 'ch_idx_B': 1}, + normalized_probability=True) + else: + # if statement required if 2 channels readout + log.warning( + 'It is recommended to do this with optimal weights') + a = ma2.Intersect_Analysis( + options_dict={'ch_idx_A': 0, + 'ch_idx_B': 1}, + normalized_probability=False) + return a + + def measure_single_qubit_randomized_benchmarking( + self, nr_cliffords=2**np.arange(12), + nr_seeds=100, + MC=None, + recompile: bool = 'as needed', + prepare_for_timedomain: bool = True, + ignore_f_cal_pts: bool = False, + compile_only: bool = False, + rb_tasks=None, + disable_metadata=False): + """ + Measures randomized benchmarking decay including second excited state + population. + + For this it: + - stores single shots using SSB weights (int. logging) + - uploads a pulse driving the ef/12 transition (should be calibr.) + - performs RB both with and without an extra pi-pulse + - Includes calibration poitns for 0, 1, and 2 (g,e, and f) + - analysis extracts fidelity and leakage/seepage + + Refs: + Knill PRA 77, 012307 (2008) + Wood PRA 97, 032306 (2018) + + Args: + nr_cliffords (array): + list of lengths of the clifford gate sequences + + nr_seeds (int): + number of random sequences for each sequence length + + recompile (bool, str {'as needed'}): + indicate whether to regenerate the sequences of clifford gates. + By default it checks whether the needed sequences were already + generated since the most recent change of OpenQL file + specified in self.cfg_openql_platform_fn + """ + + # because only 1 seed is uploaded each time + if MC is None: + MC = self.instr_MC.get_instr() + + assert 'IQ' in self.ro_acq_weight_type() + assert self.ro_acq_digitized()==False + + if prepare_for_timedomain: + self.prepare_for_timedomain() + MC.soft_avg(1) + + net_cliffords = [0, 3] # always measure double sided + + def send_rb_tasks(pool_): + tasks_inputs = [] + for i in range(nr_seeds): + task_dict = dict( + qubits=[self.cfg_qubit_nr()], + nr_cliffords=nr_cliffords, + net_cliffords=net_cliffords, # always measure double sided + nr_seeds=1, + platf_cfg=self.cfg_openql_platform_fn(), + program_name='RB_s{}_ncl{}_net{}_{}'.format( + i, nr_cliffords, net_cliffords, self.name), + recompile=recompile + ) + tasks_inputs.append(task_dict) + # pool.starmap_async can be used for positional arguments + # but we are using a wrapper + rb_tasks = pool_.map_async(cl_oql.parallel_friendly_rb, tasks_inputs) + + return rb_tasks + + if compile_only: + assert pool is not None + rb_tasks = send_rb_tasks(pool) + return rb_tasks + + if rb_tasks is None: + # Using `with ...:` makes sure the other processes will be terminated + # avoid starting too mane processes, + # nr_processes = None will start as many as the PC can handle + nr_processes = None if recompile else 1 + with multiprocessing.Pool(nr_processes) as pool: + rb_tasks = send_rb_tasks(pool) + cl_oql.wait_for_rb_tasks(rb_tasks) + + print(rb_tasks) + programs_filenames = rb_tasks.get() + + counter_param = ManualParameter('name_ctr', initial_value=0) + prepare_function_kwargs = { + 'counter_param': counter_param, + 'programs_filenames': programs_filenames, + 'CC': self.instr_CC.get_instr()} + + # to include calibration points + sweep_points = np.append( + # repeat twice because of net clifford being 0 and 3 + np.repeat(nr_cliffords, 2), + [nr_cliffords[-1] + .5] * 2 + [nr_cliffords[-1] + 1.5] * 2 + + [nr_cliffords[-1] + 2.5] * 2, + ) + + d = self.int_log_det + d.prepare_function = load_range_of_oql_programs_from_filenames + d.prepare_function_kwargs = prepare_function_kwargs + reps_per_seed = 2**13 // len(sweep_points) + d.nr_shots = reps_per_seed * len(sweep_points) + + s = swf.None_Sweep(parameter_name='Number of Cliffords', unit='#') + + MC.set_sweep_function(s) + MC.set_sweep_points(np.tile(sweep_points, reps_per_seed * nr_seeds)) + MC.set_detector_function(d) + MC.run('RB_{}seeds'.format(nr_seeds) + self.msmt_suffix, + exp_metadata={'bins': sweep_points}, + disable_snapshot_metadata=disable_metadata) + + a = ma2.RandomizedBenchmarking_SingleQubit_Analysis( + label='RB_', + rates_I_quad_ch_idx=0, + cal_pnts_in_dset=np.repeat(["0", "1", "2"], 2)) + return a + + def measure_randomized_benchmarking_old(self, nr_cliffords=2**np.arange(12), + nr_seeds=100, + double_curves=False, + MC=None, analyze=True, close_fig=True, + verbose: bool = True, upload=True, + update=True): + # Old version not including two-state calibration points and logging + # detector. + # Adding calibration points + if double_curves: + nr_cliffords = np.repeat(nr_cliffords, 2) + nr_cliffords = np.append( + nr_cliffords, [nr_cliffords[-1]+.5]*2 + [nr_cliffords[-1]+1.5]*2) + self.prepare_for_timedomain() + if MC is None: + MC = self.instr_MC.get_instr() + MC.soft_avg(nr_seeds) + counter_param = ManualParameter('name_ctr', initial_value=0) + programs = [] + if verbose: + print('Generating {} RB programs'.format(nr_seeds)) + t0 = time.time() + for i in range(nr_seeds): + p = sqo.randomized_benchmarking( + qubit_idx=self.cfg_qubit_nr(), + nr_cliffords=nr_cliffords, + platf_cfg=self.cfg_openql_platform_fn(), + nr_seeds=1, program_name='RB_{}'.format(i), + double_curves=double_curves) + programs.append(p) + if verbose: + print('Succesfully generated {} RB programs in {:.1f}s'.format( + nr_seeds, time.time()-t0)) + + prepare_function_kwargs = { + 'counter_param': counter_param, + 'programs': programs, + 'CC': self.instr_CC.get_instr()} + + d = self.int_avg_det + d.prepare_function = load_range_of_oql_programs + d.prepare_function_kwargs = prepare_function_kwargs + d.nr_averages = 128 + + s = swf.None_Sweep() + s.parameter_name = 'Number of Cliffords' + s.unit = '#' + MC.set_sweep_function(s) + MC.set_sweep_points(nr_cliffords) + + MC.set_detector_function(d) + MC.run('RB_{}seeds'.format(nr_seeds)+self.msmt_suffix) + if double_curves: + a = ma.RB_double_curve_Analysis( + T1=self.T1(), + pulse_delay=self.mw_gauss_width.get()*4) + else: + a = ma.RandomizedBenchmarking_Analysis( + close_main_fig=close_fig, T1=self.T1(), + pulse_delay=self.mw_gauss_width.get()*4) + if update: + self.F_RB(a.fit_res.params['fidelity_per_Clifford'].value) + return a.fit_res.params['fidelity_per_Clifford'].value + + def measure_ef_rabi_2D(self, + amps: list = np.linspace(0, .8, 18), + anharmonicity: list = np.arange(-275e6,-326e6,-5e6), + recovery_pulse: bool = True, + measure_3rd_state: bool = False, + MC=None, label: str = '', + analyze=True, close_fig=True, + prepare_for_timedomain=True): + """ + Measures a rabi oscillation of the ef/12 transition. + Can also be used for fh/23 transition with flag + (This requires a calibrated 12 pulse). + + Modulation frequency of the "ef" pusles is controlled through the + `anharmonicity` parameter of the qubit object. + Hint: the expected pi-pulse amplitude of the ef/12 transition is ~1/2 + the pi-pulse amplitude of the ge/01 transition. + """ + if MC is None: + MC = self.instr_MC.get_instr() + if prepare_for_timedomain: + self.prepare_for_timedomain() + + mw_lutman = self.instr_LutMan_MW.get_instr() + mw_lutman.load_ef_rabi_pulses_to_AWG_lookuptable(amps=amps) + + p = sqo.ef_rabi_seq( + self.cfg_qubit_nr(), + amps=amps, recovery_pulse=recovery_pulse, + measure_3rd_state=measure_3rd_state, + platf_cfg=self.cfg_openql_platform_fn()) + + s = swf.OpenQL_Sweep(openql_program=p, + parameter_name='Pulse amp', + unit='dac', + CCL=self.instr_CC.get_instr()) + d = self.int_avg_det + MC.set_sweep_function(s) + MC.set_sweep_points(p.sweep_points) + MC.set_sweep_function_2D(swf.anharmonicity_sweep(qubit=self, + amps=amps)) + MC.set_sweep_points_2D(anharmonicity) + MC.set_detector_function(d) + try: + if measure_3rd_state: + _title = 'fh_rabi_2D' + else: + _title = 'ef_rabi_2D' + MC.run(_title+label+self.msmt_suffix, mode='2D') + except: + print_exception() + mw_lutman.set_default_lutmap() + if analyze: + a = ma.TwoD_Analysis() + return a + + def measure_ef_rabi(self, + amps: list = np.linspace(0, .8, 18), + recovery_pulse: bool = True, + MC=None, label: str = '', + measure_3rd_state: bool = False, + disable_metadata: bool = False, + analyze=True, close_fig=True, + prepare_for_timedomain=True): + """ + Measures a rabi oscillation of the ef/12 transition. + + Modulation frequency of the "ef" pusles is controlled through the + `anharmonicity` parameter of the qubit object. + Hint: the expected pi-pulse amplitude of the ef/12 transition is ~1/2 + the pi-pulse amplitude of the ge/01 transition. + """ + if MC is None: + MC = self.instr_MC.get_instr() + if prepare_for_timedomain: + self.prepare_for_timedomain() + + mw_lutman = self.instr_LutMan_MW.get_instr() + mw_lutman.load_ef_rabi_pulses_to_AWG_lookuptable(amps=amps) + + p = sqo.ef_rabi_seq( + self.cfg_qubit_nr(), + amps=amps, recovery_pulse=recovery_pulse, + measure_3rd_state = measure_3rd_state, + platf_cfg=self.cfg_openql_platform_fn(), + add_cal_points = True) + + s = swf.OpenQL_Sweep(openql_program=p, + parameter_name='Pulse amp', + unit='dac', + CCL=self.instr_CC.get_instr()) + d = self.int_avg_det + MC.set_sweep_function(s) + MC.set_sweep_points(p.sweep_points) + MC.set_detector_function(d) + mw_lutman = self.instr_LutMan_MW.get_instr() + try: + if measure_3rd_state: + _title = 'fh_rabi' + else: + _title = 'ef_rabi' + MC.run(_title+label+self.msmt_suffix, + disable_snapshot_metadata = disable_metadata) + mw_lutman.set_default_lutmap() + except: + print_exception() + mw_lutman.set_default_lutmap() + if analyze: + a2 = ma2.EFRabiAnalysis(close_figs=True, label=_title) + return a2 + + def calibrate_ef_rabi(self, + amps: list = np.linspace(-.8, .8, 18), + recovery_pulse: bool = True, + MC=None, label: str = '', + disable_metadata = False, + measure_3rd_state: bool = False, + analyze=True, close_fig=True, + prepare_for_timedomain=True, update=True): + """ + Calibrates the pi pulse of the ef/12 transition using + a rabi oscillation of the ef/12 transition. + + Modulation frequency of the "ef" pusles is controlled through the + `anharmonicity` parameter of the qubit object. + Hint: the expected pi-pulse amplitude of the ef/12 transition is ~1/2 + the pi-pulse amplitude of the ge/01 transition. + """ + try: + a2 = self.measure_ef_rabi(amps = amps, + recovery_pulse = recovery_pulse, + MC = MC, label = label, + disable_metadata = disable_metadata, + measure_3rd_state = measure_3rd_state, + analyze = analyze, close_fig = close_fig, + prepare_for_timedomain = prepare_for_timedomain) + mw_lutman = self.instr_LutMan_MW.get_instr() + mw_lutman.set_default_lutmap() + except: + print_exception() + mw_lutman.set_default_lutmap() + if update: + ef_pi_amp = a2.proc_data_dict['ef_pi_amp'] + if ef_pi_amp > 1: + ef_pi_amp = 1 + if measure_3rd_state: + self.mw_fh_amp(ef_pi_amp) + else: + self.mw_ef_amp(ef_pi_amp) + + def measure_gst_1Q(self, + shots_per_meas: int, + maxL: int = 256, + MC=None, + recompile='as needed', + prepare_for_timedomain: bool = True): + """ + Performs single qubit Gate Set Tomography experiment of the StdXYI gateset. + + Requires optimal weights and a calibrated digitized readout. + + Args: + shots_per_meas (int): + maxL (int) : specifies the maximum germ length, + must be power of 2. + lite_germs(bool) : if True uses "lite" germs + + + """ + if MC is None: + MC = self.instr_MC.get_instr() + + ######################################## + # Readout settings that have to be set # + ######################################## + + old_weight_type = self.ro_acq_weight_type() + old_digitized = self.ro_acq_digitized() + self.ro_acq_weight_type('optimal') + self.ro_acq_digitized(True) + + if prepare_for_timedomain: + self.prepare_for_timedomain() + else: + self.prepare_readout() + MC.soft_avg(1) + # set back the settings + self.ro_acq_weight_type(old_weight_type) + self.ro_acq_digitized(old_digitized) + + ######################################## + # Readout settings that have to be set # + ######################################## + + programs, exp_list_fn = pygsti_oql.single_qubit_gst( + q0=self.cfg_qubit_nr(), + maxL=maxL, + platf_cfg=self.cfg_openql_platform_fn(), + recompile=recompile) + + counter_param = ManualParameter('name_ctr', initial_value=0) + + s = swf.OpenQL_Sweep(openql_program=programs[0], + CCL=self.instr_CC.get_instr()) + d = self.int_log_det + + # poor man's GST contains 731 distinct gatestrings + + sweep_points = np.concatenate([p.sweep_points for p in programs]) + nr_of_meas = len(sweep_points) + print('nr_of_meas:', nr_of_meas) + + prepare_function_kwargs = { + 'counter_param': counter_param, + 'programs': programs, + 'CC': self.instr_CC.get_instr(), + 'detector': d} + # hacky as heck + d.prepare_function_kwargs = prepare_function_kwargs + d.prepare_function = oqh.load_range_of_oql_programs_varying_nr_shots + + shots = np.tile(sweep_points, shots_per_meas) + + MC.soft_avg(1) + MC.set_sweep_function(s) + MC.set_sweep_points(shots) + MC.set_detector_function(d) + MC.run('Single_qubit_GST_L{}_{}'.format(maxL, self.msmt_suffix), + exp_metadata={'bins': sweep_points, + 'gst_exp_list_filename': exp_list_fn}) + a = ma2.GST_SingleQubit_DataExtraction(label='Single_qubit_GST') + return a + + def measure_flux_arc_tracked_spectroscopy(self, dac_values=None, + polycoeffs=None, MC=None, + nested_MC=None, fluxChan=None): + """ + Creates a qubit DAC arc by fitting a polynomial function through qubit + frequencies obtained by spectroscopy. + + If polycoeffs is given, it will predict the first frequencies to + measure by from this estimate. If not, it will use a wider range in + spectroscopy for the first to values to ensure a peak in spectroscopy + is found. + + It will fit a 2nd degree polynomial each time qubit spectroscopy is + performed, and all measured qubit frequencies to construct a new + polynomial after each spectroscopy measurement. + + Args: + dac_values (array): + DAC values that are to be probed, which control the flux bias + + polycoeffs (array): + initial coefficients of a second order polynomial. Used + for predicting the qubit frequencies in the arc. + + MC (MeasurementControl): + main MC that varies the DAC current + + nested_MC (MeasurementControl): + MC that will measure spectroscopy for each current. + Is used inside the composite detector + + fluxChan (str): + Fluxchannel that is varied. Defaults to self.fl_dc_ch + """ + + if dac_values is None: + if self.fl_dc_I0() is None: + dac_values = np.linspace(-5e-3, 5e-3, 11) + else: + dac_values_1 = np.linspace(self.fl_dc_I0(), + self.fl_dc_I0() + 3e-3, + 11) + dac_values_2 = np.linspace(self.fl_dc_I0() + 3e-3, + self.fl_dc_I0() + 5e-3, + 6) + dac_values_ = np.linspace(self.fl_dc_I0(), + self.fl_dc_I0() - 5e-3, + 11) + + dac_values = np.concatenate([dac_values_1, dac_values_2]) + + if MC is None: + MC = self.instr_MC.get_instr() + + if nested_MC is None: + nested_MC = self.instr_nested_MC.get_instr() + + fluxcontrol = self.instr_FluxCtrl.get_instr() + if fluxChan is None: + dac_par = fluxcontrol.parameters[(self.fl_dc_ch())] + else: + dac_par = fluxcontrol.parameters[(fluxChan)] + + if polycoeffs is None: + polycoeffs = self.fl_dc_polycoeff() + + d = cdf.Tracked_Qubit_Spectroscopy(qubit=self, + nested_MC=nested_MC, + qubit_initial_frequency=self.freq_qubit(), + resonator_initial_frequency=self.freq_res(), + sweep_points=dac_values, + polycoeffs=polycoeffs) + + MC.set_sweep_function(dac_par) + MC.set_sweep_points(dac_values) + MC.set_detector_function(d) + MC.run(name='Tracked_Spectroscopy') + + def measure_LRU_experiment(self, + nr_shots_per_case: int = 2**13, + heralded_init: bool = False, + h_state: bool = False, + prepare_for_timedomain: bool = True, + reduced_prepare: bool = False, + analyze: bool = True, + extract_only: bool = False, + disable_metadata: bool = False): + ''' + Function to measure 2-state fraction removed by LRU pulse. + ''' + assert self.instr_LutMan_LRU() != None, 'LRU lutman is required.' + assert self.ro_acq_digitized() == False, 'Analog readout required' + assert 'IQ' in self.ro_acq_weight_type(), 'IQ readout is required!' + MC = self.instr_MC.get_instr() + if prepare_for_timedomain: + mwl = self.instr_LutMan_MW.get_instr() + mwl.set_default_lutmap() + if reduced_prepare: + self._prep_LRU_pulses() + else: + self.prepare_for_timedomain() + # Set UHF number of shots + from math import ceil + shots_per_exp = 4 + if h_state: + shots_per_exp += 2 + nr_shots = (shots_per_exp)*nr_shots_per_case + if heralded_init: + nr_shots *= 2 + uhfqc_max_shots = 2**20 + if nr_shots < uhfqc_max_shots: + # all shots can be acquired in a single UHF run + shots_per_run = nr_shots + else: + # Number of UHF acquisition runs + nr_runs = ceil(nr_shots/uhfqc_max_shots) + shots_per_run = int((nr_shots/nr_runs)/shots_per_exp)*shots_per_exp + nr_shots = nr_runs*shots_per_run + p = sqo.LRU_experiment( + qubit_idx=self.cfg_qubit_nr(), + LRU_duration_ns=self.LRU_duration()*1e9, + h_state=h_state, + heralded_init=heralded_init, + platf_cfg=self.cfg_openql_platform_fn()) + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name='Shot', unit='#', + upload=True) + MC.soft_avg(1) + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(nr_shots)) + d = self.int_log_det + d.nr_shots = shots_per_run + MC.set_detector_function(d) + MC.live_plot_enabled(False) + label = 'LRU_experiment' + if h_state: + label += '_h_state' + MC.run(label+self.msmt_suffix, disable_snapshot_metadata=disable_metadata) + MC.live_plot_enabled(True) + # Analysis + if analyze: + a = ma2.lrua.LRU_experiment_Analysis(qubit=self.name, + h_state=h_state, + heralded_init=heralded_init, + extract_only=extract_only) + return a.qoi + + def measure_LRU_repeated_experiment(self, + rounds: int = 50, + injected_leak: float = 0.02, + leak_3rd_state: bool = False, + h_state: bool = False, + nr_shots_per_case: int = 2**13, + prepare_for_timedomain: bool = True, + analyze: bool = True, + disable_metadata: bool = False): + ''' + Function to measure 2-state fraction removed by LRU pulse. + ''' + assert self.instr_LutMan_LRU() != None, 'LRU lutman is required.' + assert self.ro_acq_digitized() == False, 'Analog readout required' + assert 'IQ' in self.ro_acq_weight_type(), 'IQ readout is required!' + MC = self.instr_MC.get_instr() + # Convert leakage into angle + theta = 2*np.arcsin(np.sqrt(2*injected_leak))/np.pi*180 + if prepare_for_timedomain: + # Configure lutmap + mwl = self.instr_LutMan_MW.get_instr() + mwl.set_default_lutmap() + lutmap = mwl.LutMap() + if leak_3rd_state: + lutmap[10] = {'name': 'leak', 'theta': theta, 'phi': 0, 'type': 'fh'} + else: + lutmap[10] = {'name': 'leak', 'theta': theta, 'phi': 0, 'type': 'ef'} + mwl.LutMap(lutmap) + self.prepare_for_timedomain() + # Set UHF number of shots + from math import ceil + _cycle = 2*rounds+4 + if h_state: + _cycle += 1 + nr_shots = _cycle*nr_shots_per_case + # if heralded_init: + # nr_shots *= 2 + uhfqc_max_shots = 2**20 + if nr_shots < uhfqc_max_shots: + # all shots can be acquired in a single UHF run + shots_per_run = nr_shots + else: + # Number of UHF acquisition runs + nr_runs = ceil(nr_shots/uhfqc_max_shots) + shots_per_run = int((nr_shots/nr_runs)/_cycle)*_cycle + nr_shots = nr_runs*shots_per_run + # Compile sequence + p = sqo.LRU_repeated_experiment( + qubit_idx=self.cfg_qubit_nr(), + LRU_duration_ns=self.LRU_duration()*1e9, + leak_3rd_state=leak_3rd_state, + rounds=rounds, + heralded_init=False, + h_state=h_state, + platf_cfg=self.cfg_openql_platform_fn()) + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name='Shot', unit='#', + upload=True) + MC.soft_avg(1) + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(nr_shots)) + d = self.int_log_det + d.nr_shots = shots_per_run + MC.set_detector_function(d) + MC.live_plot_enabled(False) + label = 'Repeated_LRU_experiment' + MC.run(label+self.msmt_suffix, disable_snapshot_metadata=disable_metadata) + MC.live_plot_enabled(True) + # Analysis + if analyze: + a = ma2.lrua.Repeated_LRU_experiment_Analysis( + qubit=self.name, rounds=rounds, label='Repeated_LRU', + h_state=h_state) + + def measure_LRU_process_tomo(self, + nr_shots_per_case: int = 2**13, + prepare_for_timedomain: bool = True, + reduced_prepare: bool = False, + idle: bool = False, + disable_metadata: bool = False, + update: bool = True): + ''' + Function to measure 2-state fraction removed by LRU pulse. + Recommended number of averages is 2**15. One can go lower + depending on the required precision of the tomography. + ''' + assert self.instr_LutMan_LRU() != None, 'LRU lutman is required.' + assert self.ro_acq_digitized() == False, 'Analog readout required.' + assert 'IQ' in self.ro_acq_weight_type(), 'IQ readout is required.' + MC = self.instr_MC.get_instr() + if prepare_for_timedomain: + if reduced_prepare: + self._prep_LRU_pulses() + else: + self.prepare_for_timedomain() + # Experiment + nr_shots = (18+4)*nr_shots_per_case + p = sqo.LRU_process_tomograhpy( + qubit_idx = self.cfg_qubit_nr(), + LRU_duration_ns = self.LRU_duration()*1e9, + platf_cfg = self.cfg_openql_platform_fn(), + idle = idle) + s = swf.OpenQL_Sweep(openql_program=p, + CCL=self.instr_CC.get_instr(), + parameter_name='Shot', unit='#', + upload=True) + MC.soft_avg(1) + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(nr_shots)) + d = self.int_log_det + d.nr_shots = nr_shots + MC.set_detector_function(d) + MC.live_plot_enabled(False) + label = 'LRU_process_tomograhpy' + MC.run(label+self.msmt_suffix, disable_snapshot_metadata=disable_metadata) + MC.live_plot_enabled(True) + # Analysis + a = ma2.lrua.LRU_process_tomo_Analysis( + qubit=self.name, post_select_2state=False, + fit_3gauss=False) + if update: + angle = a.proc_data_dict['angle_p'] + mw_lm = self.instr_LutMan_MW.get_instr() + phase = mw_lm.LRU_virtual_q_ph_corr() + mw_lm.LRU_virtual_q_ph_corr(np.mod(phase-angle, 360)) + + def calibrate_LRU_frequency(self, + frequencies: list, + nr_shots_per_point: int = 2**10, + heralded_init: bool = False, + prepare_for_timedomain: bool = True, + update: bool = True, + disable_metadata: bool = False): + ''' + Sweeps the frequency of the LRU pulse while measuring the + leakage removal fration. Updates frequency of LRU choosing + the value that maximizes leakage removal fraction. + ''' + assert self.instr_LutMan_LRU() != None, 'LRU lutman is required.' + assert self.ro_acq_digitized() == False, 'Analog readout required.' + assert 'IQ' in self.ro_acq_weight_type(), 'IQ readout is required.' + nested_MC = self.instr_nested_MC.get_instr() + # Save initial LO frequency (to be reset in the end) + lru_lo = self.instr_LO_LRU.get_instr() + current_lo_freq = lru_lo.frequency() + # prepare for experiment + if prepare_for_timedomain: + self.prepare_for_timedomain() + # wrapper function to be used as detector + def wrapper(nr_shots_per_case, + heralded_init): + results = self.measure_LRU_experiment( + nr_shots_per_case=nr_shots_per_case, + heralded_init=heralded_init, + disable_metadata=True, + prepare_for_timedomain=False, + extract_only=True, + analyze=True) + out_dict = {'Population 0': results['pop_vec'][0], + 'Population 1': results['pop_vec'][1], + 'Population 2': results['pop_vec'][2]} + return out_dict + d = det.Function_Detector( + wrapper, + msmt_kw={'nr_shots_per_case': nr_shots_per_point, + 'heralded_init': heralded_init}, + result_keys=['Population 0', 'Population 1', 'Population 2'], + value_names=['Population 0', 'Population 1', 'Population 2'], + value_units=['fraction', 'fraction', 'fraction']) + nested_MC.set_detector_function(d) + sweep_function = swf.LRU_freq_sweep(self) + nested_MC.set_sweep_function(sweep_function) + nested_MC.set_sweep_points(frequencies) + try: + nested_MC.run(f'LRU_frequency_sweep_{self.name}', + disable_snapshot_metadata=disable_metadata) + a = ma2.lrua.LRU_frequency_sweep_Analysis( + label='LRU_frequency_sweep') + # reset LO frequency + lru_lo.frequency(current_lo_freq) + if update: + self.LRU_freq(a.qoi['f_optimal']) + return True + except: + # reset LO frequency + lru_lo.frequency(current_lo_freq) + print_exception() + return False + + def measure_flux_frequency_timedomain( + self, + amplitude: float = None, + times: list = np.arange(20e-9, 40e-9, 1/2.4e9), + wait_time_flux: int = 0, + disable_metadata: bool = False, + analyze: bool = True, + prepare_for_timedomain: bool = True, + ): + """ + Performs a cryoscope experiment to measure frequency + detuning for a given flux pulse amplitude. + Args: + Times: + Flux pulse durations used for cryoscope trace. + Amplitudes: + Amplitude of flux pulse used for cryoscope trace. + Note on analysis: The frequency is calculated based on + a FFT of the cryoscope trace. This means the frequency + resolution of this measurement will be given by the duration + of the cryoscope trace. To minimize the duration of this + measurement we obtain the center frequency of the FFT by + fitting it to a Lorentzian, which circumvents the frequency + sampling. + """ + assert self.ro_acq_weight_type()=='optimal' + MC = self.instr_MC.get_instr() + nested_MC = self.instr_nested_MC.get_instr() + fl_lutman = self.instr_LutMan_Flux.get_instr() + if amplitude: + fl_lutman.sq_amp(amplitude) + out_voltage = fl_lutman.sq_amp()*\ + fl_lutman.cfg_awg_channel_amplitude()*\ + fl_lutman.cfg_awg_channel_range()/2 + if prepare_for_timedomain: + self.prepare_for_timedomain() + fl_lutman.load_waveforms_onto_AWG_lookuptable() + p = mqo.Cryoscope( + qubit_idxs=[self.cfg_qubit_nr()], + flux_cw="sf_square", + wait_time_flux=wait_time_flux, + platf_cfg=self.cfg_openql_platform_fn(), + cc=self.instr_CC.get_instr().name, + double_projections=False, + ) + self.instr_CC.get_instr().eqasm_program(p.filename) + self.instr_CC.get_instr().start() + sw_function = swf.FLsweep(fl_lutman, fl_lutman.sq_length, + waveform_name="square") + MC.set_sweep_function(sw_function) + MC.set_sweep_points(times) + values_per_point = 2 + values_per_point_suffex = ["cos", "sin"] + d = self.get_int_avg_det( + values_per_point=values_per_point, + values_per_point_suffex=values_per_point_suffex, + single_int_avg=True, + always_prepare=False + ) + MC.set_detector_function(d) + label = f'Voltage_to_frequency_{out_voltage:.2f}V_{self.name}' + MC.run(label,disable_snapshot_metadata=disable_metadata) + # Run analysis + if analyze: + a = ma2.cv2.Time_frequency_analysis( + label='Voltage_to_frequency') + return a + + def calibrate_flux_arc( + self, + Times: list = np.arange(20e-9, 40e-9, 1/2.4e9), + Amplitudes: list = [-0.4, -0.35, -0.3, 0.3, 0.35, 0.4], + update: bool = True, + disable_metadata: bool = False, + prepare_for_timedomain: bool = True, + fix_zero_detuning: bool = True): + """ + Calibrates the polynomial coeficients for flux (voltage) + to frequency conversion. Does so by measuring cryoscope traces + at different amplitudes. + Args: + Times: + Flux pulse durations used to measure each + cryoscope trace. + Amplitudes: + DAC amplitudes of flux pulse used for each + cryoscope trace. + fix_zero_detuning: + Used to force the extracted flux arc detuning + be zero at zero amplitude. This should be enabled + when operating qubits at the sweetspot. + """ + assert self.ro_acq_weight_type()=='optimal' + nested_MC = self.instr_nested_MC.get_instr() + fl_lutman = self.instr_LutMan_Flux.get_instr() + if prepare_for_timedomain: + self.prepare_for_timedomain() + fl_lutman.load_waveforms_onto_AWG_lookuptable() + sw_function = swf.FLsweep(fl_lutman, fl_lutman.sq_amp, + waveform_name="square") + nested_MC.set_sweep_function(sw_function) + nested_MC.set_sweep_points(Amplitudes) + def wrapper(): + a = self.measure_flux_frequency_timedomain( + times = Times, + disable_metadata=True, + prepare_for_timedomain=False) + return {'detuning':a.proc_data_dict['detuning']} + d = det.Function_Detector( + wrapper, + result_keys=['detuning'], + value_names=['detuning'], + value_units=['Hz']) + nested_MC.set_detector_function(d) + label = f'Voltage_frequency_arc_{self.name}' + nested_MC.run(label, disable_snapshot_metadata=disable_metadata) + a = ma2.cv2.Flux_arc_analysis(label='Voltage_frequency_arc', + channel_amp=fl_lutman.cfg_awg_channel_amplitude(), + channel_range=fl_lutman.cfg_awg_channel_range(), + fix_zero_detuning=fix_zero_detuning) + # Update detuning polynomial coeficients + if update: + p_coefs = a.qoi['P_coefs'] + fl_lutman.q_polycoeffs_freq_01_det(p_coefs) + return a + + def measure_flux_arc_dc( + self, + flux_bias_span: np.ndarray = np.linspace(-100e-6, 100e-6, 7), + flux_pulse_durations: np.ndarray = np.arange(40e-9, 60e-9, 1/2.4e9), + flux_pulse_amplitude: float = 0.4, + disable_metadata: bool = False, + prepare_for_timedomain: bool = True, + analyze: bool = True, + ): + """ + Measures the DC flux arc by calibrating the polynomial coefficients for flux (voltage) + to frequency conversion. This is achieved by measuring cryoscope traces at different + flux pulse amplitudes and durations. The method ensures accurate frequency to flux conversion + essential for quantum experiments involving flux-tunable qubits. + + Args: + flux_bias_span (np.ndarray): + Span of flux bias points around the original current setting. These points define + the range over which the flux arc is measured. Defaults to a linear space + between -100e-6 and 100e-6 with 7 points. + flux_pulse_durations (np.ndarray): + Array of flux pulse durations to be used for each cryoscope trace measurement. + Defines how long the flux pulse is applied during the measurement. Defaults to + an array ranging from 40e-9 to 60e-9 with steps inversely proportional to 2.4e9. + flux_pulse_amplitude (float): + The amplitude of the flux pulse applied during the cryoscope trace measurement. + Determines the strength of the flux pulse. Default value is 0.4. + disable_metadata (bool): + If set to True, disables the inclusion of metadata in the measurement snapshot. + Useful for repeated measurements where metadata redundancy is unnecessary. + Defaults to False. + prepare_for_timedomain (bool): + Determines whether the system should be prepared for time-domain measurements + before conducting the flux arc measurement. Defaults to True. + + Returns: + If analyze is True, returns an analysis object containing results of the flux arc + symmetry and intersection analysis. Otherwise, returns the raw measurement response + from the nested measurement control. + + Note: + This method assumes that the readout acquisition weight type is set to 'optimal'. + An assertion error is raised if the condition is not met. + """ + assert self.ro_acq_weight_type() == 'optimal', "Expects transmon acquisition weight type to be 'optimal'" + + # Get instruments + nested_MC = self.instr_nested_MC.get_instr() + _flux_lutman = self.instr_LutMan_Flux.get_instr() + _flux_instrument = self.instr_FluxCtrl.get_instr() + _flux_parameter = _flux_instrument[f'FBL_{self.name}'] + + original_current: float = self.fl_dc_I0() + flux_bias_array: np.ndarray = flux_bias_span + original_current + + local_prepare = ManualParameter('local_prepare', initial_value=prepare_for_timedomain) + def wrapper(): + a_positive = self.measure_flux_frequency_timedomain( + amplitude=+flux_pulse_amplitude, + times=flux_pulse_durations, + disable_metadata=True, + prepare_for_timedomain=local_prepare(), + ) + local_prepare(False) # Turn off prepare for followup measurements + a_negative = self.measure_flux_frequency_timedomain( + amplitude=-flux_pulse_amplitude, + times=flux_pulse_durations, + disable_metadata=True, + prepare_for_timedomain=False, + ) + return { + 'detuning_positive': a_positive.proc_data_dict['detuning'], + 'detuning_negative': a_negative.proc_data_dict['detuning'], + 'amplitude': flux_pulse_amplitude, + } + d = det.Function_Detector( + wrapper, + result_keys=['detuning_positive', 'detuning_negative'], + value_names=['detuning', 'detuning'], + value_units=['Hz', 'Hz']) + + nested_MC.set_detector_function(d) + nested_MC.set_sweep_function(_flux_parameter) + nested_MC.set_sweep_points(np.atleast_1d(flux_bias_array)) + + label = f'voltage_to_frequency_dc_flux_arc_{self.name}' + try: + response = nested_MC.run(label, disable_snapshot_metadata=disable_metadata) + except Exception as e: + log.warning(e) + finally: + _flux_parameter(original_current) + if analyze: + a = ma2.cv2.FluxArcSymmetryIntersectionAnalysis( + initial_bias=original_current, + label=label, + ) + a.run_analysis() + return a + return response + + ########################################################################### + # Dep graph check functions + ########################################################################### + def check_qubit_spectroscopy(self, freqs=None, MC=None): + """ + Check the qubit frequency with spectroscopy of 15 points. + + Uses both the peak finder and the lorentzian fit to determine the + outcome of the check: + - Peak finder: if no peak is found, there is only noise. Will + definitely need recalibration. + - Fitting: if a peak is found, will do normal spectroscopy fitting + and determine deviation from what it thinks the qubit + frequency is + """ + if freqs is None: + freq_center = self.freq_qubit() + freq_span = 10e6 + freqs = np.linspace(freq_center - freq_span/2, + freq_center + freq_span/2, + 15) + self.measure_spectroscopy(MC=MC, freqs=freqs) + + label = 'spec' + a = ma.Qubit_Spectroscopy_Analysis(label=label, close_fig=True, + qb_name=self.name) + + freq_peak = a.peaks['peak'] + if freq_peak is None: + result = 1.0 + else: + freq = a.fitted_freq + result = np.abs(self.freq_qubit() - freq)/self.freq_qubit() + return result + + def check_rabi(self, MC=None, amps=None): + """ + Takes 5 equidistantly space points: 3 before channel amp, one at + channel amp and one after. Compares them with the expected Rabi curve + and returns a value in [0,1] to show the quality of the calibration + """ + if amps is None: + amps = np.linspace(0, 4/3*self.mw_channel_amp(), 5) + + amp = self.measure_rabi(MC=MC, amps=amps, analyze=False) + old_amp = self.mw_channel_amp() + return np.abs(amp-old_amp) + + def check_ramsey(self, MC=None, times=None, artificial_detuning=None): + + if artificial_detuning is None: + artificial_detuning = 0.1e6 + + if times is None: + times = np.linspace(0, 0.5/artificial_detuning, 6) + + a = self.measure_ramsey(times=times, MC=MC, + artificial_detuning=artificial_detuning) + freq = a['frequency'] + check_result = (freq-self.freq_qubit())/freq + return check_result + + def create_ssro_detector(self, + calibrate_optimal_weights: bool = False, + prepare_function=None, + prepare_function_kwargs: dict = None, + ssro_kwargs: dict = None): + """ + Wraps measure_ssro using the Function Detector. + + Args: + calibrate_optimal_weights + """ + if ssro_kwargs is None: + ssro_kwargs = { + 'nr_shots_per_case': 8192, + 'analyze': True, + 'prepare': False, + 'disable_metadata': True + } + + if not calibrate_optimal_weights: + d = det.Function_Detector( + self.measure_ssro, + msmt_kw=ssro_kwargs, + result_keys=['SNR', 'F_d', 'F_a'], + prepare_function=prepare_function, + prepare_function_kwargs=prepare_function_kwargs, + always_prepare=True) + else: + d = det.Function_Detector( + self.calibrate_optimal_weights, + msmt_kw=ssro_kwargs, + result_keys=['SNR', 'F_d', 'F_a'], + prepare_function=prepare_function, + prepare_function_kwargs=prepare_function_kwargs, + always_prepare=True) + return d + + # functions for quantum efficiency measurements and crossdephasing measurements + + def measure_msmt_induced_dephasing_sweeping_amps(self, amps_rel=None, + nested_MC=None, cross_target_qubits=None, + multi_qubit_platf_cfg=None, analyze=False, + verbose: bool = True, sequence='ramsey', + target_qubit_excited=False, + extra_echo=False): + waveform_name = 'up_down_down_final' + + if nested_MC is None: + nested_MC = self.instr_nested_MC.get_instr() + + if cross_target_qubits is None or (len(cross_target_qubits) == 1 and self.name == cross_target_qubits[0]): + cross_target_qubits = None + + if cross_target_qubits is None: + # Only measure on a single Qubit + cfg_qubit_nrs = [self.cfg_qubit_nr()] + optimization_M_amps = [self.ro_pulse_amp()] + optimization_M_amp_down0s = [self.ro_pulse_down_amp0()] + optimization_M_amp_down1s = [self.ro_pulse_down_amp1()] + readout_pulse_length = self.ro_pulse_length() + readout_pulse_length += self.ro_pulse_down_length0() + readout_pulse_length += self.ro_pulse_down_length1() + amps_rel = np.linspace( + 0, 0.5, 11) if amps_rel is None else amps_rel + else: + cfg_qubit_nrs = [] + optimization_M_amps = [] + optimization_M_amp_down0s = [] + optimization_M_amp_down1s = [] + readout_pulse_lengths = [] + for cross_target_qubit in cross_target_qubits: + cfg_qubit_nrs.append(cross_target_qubit.cfg_qubit_nr()) + optimization_M_amps.append(cross_target_qubit.ro_pulse_amp()) + optimization_M_amp_down0s.append( + cross_target_qubit.ro_pulse_down_amp0()) + optimization_M_amp_down1s.append( + cross_target_qubit.ro_pulse_down_amp1()) + ro_len = cross_target_qubit.ro_pulse_length() + ro_len += cross_target_qubit.ro_pulse_down_length0() + ro_len += cross_target_qubit.ro_pulse_down_length1() + readout_pulse_lengths.append(ro_len) + readout_pulse_length = np.max(readout_pulse_lengths) + + RO_lutman = self.instr_LutMan_RO.get_instr() + if sequence == 'ramsey': + RO_lutman.set('M_final_delay_R{}'.format( + self.cfg_qubit_nr()), 200e-9) + elif sequence == 'echo': + RO_lutman.set('M_final_delay_R{}'.format(self.cfg_qubit_nr()), + 200e-9) # +readout_pulse_length) + else: + raise NotImplementedError('dephasing sequence not recognized') + + old_waveform_name = self.ro_pulse_type() + self.ro_pulse_type(waveform_name) + RO_lutman.set('M_final_amp_R{}'.format(self.cfg_qubit_nr()), + self.ro_pulse_amp()) + old_delay = self.ro_acq_delay() + d = RO_lutman.get('M_final_delay_R{}'.format(self.cfg_qubit_nr())) + + self.ro_acq_delay(old_delay + readout_pulse_length + d) + + # self.ro_acq_integration_length(readout_pulse_length+100e-9) + self.ro_acq_weight_type('SSB') + self.prepare_for_timedomain() + old_ro_prepare_state = self.cfg_prepare_ro_awg() + self.cfg_prepare_ro_awg(False) + + sweep_function = swf.lutman_par_depletion_pulse_global_scaling( + LutMan=RO_lutman, + resonator_numbers=cfg_qubit_nrs, + optimization_M_amps=optimization_M_amps, + optimization_M_amp_down0s=optimization_M_amp_down0s, + optimization_M_amp_down1s=optimization_M_amp_down1s, + upload=True + ) + d = det.Function_Detector( + self.measure_msmt_induced_dephasing, + msmt_kw={ + 'cross_target_qubits': cross_target_qubits, + 'multi_qubit_platf_cfg': multi_qubit_platf_cfg, + 'analyze': True, + 'sequence': sequence, + 'target_qubit_excited': target_qubit_excited, + 'extra_echo': extra_echo + }, + result_keys=['coherence', 'phase'] + ) + + nested_MC.set_sweep_function(sweep_function) + nested_MC.set_sweep_points(amps_rel) + nested_MC.set_detector_function(d) + + label = 'ro_amp_sweep_dephasing' + self.msmt_suffix + nested_MC.run(label) + + # Reset qubit objects parameters tp previous settings + self.ro_pulse_type(old_waveform_name) + self.cfg_prepare_ro_awg(old_ro_prepare_state) + self.ro_acq_delay(old_delay) + + if analyze: + res = ma.MeasurementAnalysis( + label=label, plot_all=False, auto=True) + return res + + def measure_SNR_sweeping_amps(self, amps_rel, nr_shots=2*4094, + nested_MC=None, analyze=True): + """ + Measures SNR and readout fidelities as a function of the readout pulse + amplitude. Resonator depletion pulses are automatically scaled. + Weights are not optimized - routine is intended to be used with SSB weights. + + Args: + amps_rel (array): + readout pulse amplitudes to loop over. Value of 1 indicates + amplitude currently specified in the qubit object. + + nr_shots (int): + total number of measurements in qubit ground and excited state + """ + + if nested_MC is None: + nested_MC = self.instr_nested_MC.get_instr() + self.prepare_for_timedomain() + RO_lutman = self.instr_LutMan_RO.get_instr() + old_ro_prepare_state = self.cfg_prepare_ro_awg() + self.cfg_prepare_ro_awg(False) + + sweep_function = swf.lutman_par_depletion_pulse_global_scaling( + LutMan=RO_lutman, + resonator_numbers=[self.cfg_qubit_nr()], + optimization_M_amps=[self.ro_pulse_amp()], + optimization_M_amp_down0s=[self.ro_pulse_down_amp0()], + optimization_M_amp_down1s=[self.ro_pulse_down_amp1()], + upload=True + ) + d = det.Function_Detector( + self.measure_ssro, + msmt_kw={ + 'nr_shots': nr_shots, + 'analyze': True, 'SNR_detector': True, + 'cal_residual_excitation': False, + }, + result_keys=['SNR', 'F_d', 'F_a'] + ) + + nested_MC.set_sweep_function(sweep_function) + nested_MC.set_sweep_points(amps_rel) + nested_MC.set_detector_function(d) + label = 'ro_amp_sweep_SNR' + self.msmt_suffix + nested_MC.run(label) + + self.cfg_prepare_ro_awg(old_ro_prepare_state) + + if analyze: + ma.MeasurementAnalysis(label=label, plot_all=False, auto=True) + + def measure_quantum_efficiency(self, amps_rel=None, nr_shots=2*4094, + analyze=True, verbose=True, + dephasing_sequence='ramsey'): + # requires the cc light to have the readout time configured equal + # to the measurement and depletion time + 60 ns buffer + # it requires an optimized depletion pulse + amps_rel = np.linspace(0, 0.5, 11) if amps_rel is None else amps_rel + self.cfg_prepare_ro_awg(True) + + start_time = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + + self.measure_msmt_induced_dephasing_sweeping_amps( + amps_rel=amps_rel, + analyze=False, + sequence=dephasing_sequence) + readout_pulse_length = self.ro_pulse_length() + readout_pulse_length += self.ro_pulse_down_length0() + readout_pulse_length += self.ro_pulse_down_length1() + # self.ro_acq_integration_length(readout_pulse_length+0e-9) + + self.ro_pulse_type('up_down_down') + # setting acquisition weights to optimal + self.ro_acq_weight_type('optimal') + + # calibrate residual excitation and relaxation at high power + self.measure_ssro(cal_residual_excitation=True, SNR_detector=True, + nr_shots=nr_shots, update_threshold=False) + self.measure_SNR_sweeping_amps(amps_rel=amps_rel, analyze=False) + + end_time = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + + # set the pulse back to optimal depletion + self.ro_pulse_type('up_down_down') + + if analyze: + options_dict = { + 'individual_plots': True, + 'verbose': verbose, + } + qea = ma2.QuantumEfficiencyAnalysis( + t_start=start_time, + t_stop=end_time, + use_sweeps=True, + options_dict=options_dict, + label_dephasing='_ro_amp_sweep_dephasing'+self.msmt_suffix, + label_ssro='_ro_amp_sweep_SNR'+self.msmt_suffix) + + # qea.run_analysis() + eta = qea.fit_dicts['eta'] + u_eta = qea.fit_dicts['u_eta'] + + return {'eta': eta, 'u_eta': u_eta, + 't_start': start_time, 't_stop': end_time} + else: + return {} + + def calc_current_to_freq(self, curr: float): + """ + Converts DC current to requency in Hz for a qubit + + Args: + curr (float): + current in A + """ + polycoeffs = self.fl_dc_polycoeff() + + return np.polyval(polycoeffs, curr) + + def calc_freq_to_current(self, freq, kind='root_parabola', **kw): + """ + Find the amplitude that corresponds to a given frequency, by + numerically inverting the fit. + + Args: + freq (float, array): + The frequency or set of frequencies. + + **kw : get passed on to methods that implement the different "kind" + of calculations. + """ + + return ct.freq_to_amp_root_parabola(freq=freq, + poly_coeffs=self.fl_dc_polycoeff(), + **kw) + + def set_target_freqency(self,target_frequency = 6e9, + sweetspot_current = None, + sweetspot_frequency = None, + phi0 =30e-3, + Ec=270e6, + span_res=30e6, + span_q=0.5e9, + step_q = 1e6, + step_res= 0.5e6, + I_correct= 0.1e-3, + accuracy= 0.1e9, + fine_tuning= False): + """ + Fluxing a qubit to a targeted frequency based on an estimation using the fluxarc. + + Args: target_frequency (float) + frequency at which you want to bias the qubit in Hz + + sweetspot_current (float) + current at sweetspot frequency in A + sweetspot_frequency (float) + qubit frequency at sweetspot in Hz + phi0 (float) + value of phi0 (length of fluxarc) in A + Ec (float) + Value of Ec in Hz (estimated as 270 MHz) + """ + + # if target_frequency is None: + # if self.name + if sweetspot_current is None: + sweetspot_current = self.fl_dc_I0() + if sweetspot_frequency is None: + sweetspot_frequency = self.freq_max() + I=phi0/np.pi*np.arccos(((target_frequency+Ec)/(sweetspot_frequency+Ec))**2)+sweetspot_current + print('Baised current at target is {}'.format(I)) + fluxcurrent = self.instr_FluxCtrl.get_instr() + fluxcurrent.set(self.fl_dc_ch(),I) + center_res = self.freq_res() + center_q = target_frequency + if fine_tuning is False: + res =self.find_resonator_frequency(freqs=np.arange(-span_res/2,span_res/2,step_res)+center_res,update=True) + if res == self.freq_res(): + print(self.freq_res()) + else: + res2=self.find_resonator_frequency(freqs=np.arange(-span_res,span_res,step_res)+center_res,update=True) + if res2== self.freq_res(): + print(self.freqs(res)) + else: + raise ValueError('Resonator {} cannot be found at target frequency'.format(self.name)) + f = self.find_frequency(freqs=np.arange(-span_q/2,span_q/2,step_q)+center_q,update=True) + if f : + print('Qubit frequency at target is {}'.format(self.freq_qubit())) + else: + f2 = self.find_frequency(freqs=np.arange(-span_q,span_q,step_q)+center_q) + if f2==True: + print('Qubit frequency at target is {}'.format(self.freq_qubit())) + else: + raise ValueError('Qubit {} cannot be found at target frequency'.format(self.name)) + else: + while abs(self.freq_qubit() - target_frequency) > accuracy: + if self.freq_qubit() - target_frequency > 0: + I = I + I_correct + else: + I = I - I_correct + print(I) + fluxcurrent.set(self.fl_dc_ch(), I) + self.find_resonator_frequency(freqs=np.arange(-span_res/2,span_res/2,step_res)+center_res) + self.find_frequency(freqs=np.arange(-span_q/5,span_q/5,step_q)+center_q) + return True diff --git a/pycqed/instrument_drivers/meta_instrument/qubit_objects/qubit_object.py b/pycqed/instrument_drivers/meta_instrument/qubit_objects/qubit_object.py index 969f717305..f3f8c8cc0f 100644 --- a/pycqed/instrument_drivers/meta_instrument/qubit_objects/qubit_object.py +++ b/pycqed/instrument_drivers/meta_instrument/qubit_objects/qubit_object.py @@ -7,9 +7,9 @@ from qcodes.instrument.base import Instrument from qcodes.utils import validators as vals +from pycqed.measurement import detector_functions as det from qcodes.instrument.parameter import ManualParameter -from pycqed.measurement import detector_functions as det from pycqed.utilities.general import gen_sweep_pts from pycqed.analysis import measurement_analysis as ma from pycqed.analysis_v2 import measurement_analysis as ma2 @@ -1198,23 +1198,23 @@ def find_resonator_frequency( # Normal functions: ########################################################################## - def find_frequency( - self, - method='spectroscopy', - spec_mode='pulsed_marked', - steps=[1, 3, 10, 30, 100], - artificial_periods=4, - freqs=None, - f_span=100e6, - use_max=False, - f_step=1e6, - verbose=True, - update=True, - close_fig=True, - MC=None, - label='' - ): - # USED_BY: device_dependency_graphs.py + # FIXME: overridden in unused class Transmon + def find_frequency(self, + method='spectroscopy', + spec_mode='pulsed_marked', + steps=[1, 3, 10, 30, 100], + artificial_periods=4, + freqs=None, + f_span=100e6, + use_max=False, + f_step=1e6, + verbose=True, + update=True, + close_fig=True, + MC=None, + label = '', + disable_metadata=False + ): """ Finds the qubit frequency using either the spectroscopy or the Ramsey method. @@ -1304,7 +1304,8 @@ def find_frequency( return self.calibrate_frequency_ramsey( steps=steps, artificial_periods=artificial_periods, verbose=verbose, update=update, - close_fig=close_fig) + close_fig=close_fig, + disable_metadata=disable_metadata) return analysis_spec.fitted_freq def calibrate_spec_pow( @@ -1387,16 +1388,16 @@ def calibrate_motzoi( self.motzoi(opt_motzoi) return opt_motzoi - def calibrate_frequency_ramsey( - self, - steps=[1, 3, 10, 30, 100, 300, 1000], - artificial_periods=2.5, - stepsize: float = 20e-9, - verbose: bool = True, - update: bool = True, - close_fig: bool = True, - test_beating: bool = True - ): + # FIXME: overridden in unused class Transmon + def calibrate_frequency_ramsey(self, + steps=[1, 1, 3, 10, 30, 100, 300, 1000], + artificial_periods = 2.5, + stepsize:float =20e-9, + verbose: bool=True, update: bool=True, + close_fig: bool=True, + test_beating: bool=True, + disable_metadata=False + ): # USED_BY: inspire_dependency_graph.py, # USED_BY: device_dependency_graphs_v2.py, # USED_BY: device_dependency_graphs.py @@ -1427,19 +1428,27 @@ def calibrate_frequency_ramsey( freq_qubit=cur_freq, label='_{}pulse_sep'.format(n), analyze=False, - prepare_for_timedomain=True if 0 == i else False) + prepare_for_timedomain=True if 0 == i else False, + disable_metadata=disable_metadata) a = ma.Ramsey_Analysis(auto=True, close_fig=close_fig, freq_qubit=cur_freq, artificial_detuning=artificial_detuning, close_file=False) - if test_beating and a.fit_res.chisqr > 0.4: + print(a.fit_res.chisqr) + if test_beating and a.fit_res.chisqr > 0.1: logging.warning('Found double frequency in Ramsey: large ' 'deviation found in single frequency fit.' 'Returning True to continue automation. Retry ' 'with test_beating=False to ignore.') - - return True - fitted_freq = a.fit_res.params['frequency'].value + # If Double beating is found in Ramsey, the chosen frequency + # will be set to the average of the two frequencies. + b = ma.DoubleFrequency() + fitted_freq = (b.fit_res.params['freq_1'].value+\ + b.fit_res.params['freq_2'].value)/2 + b.T2_star = {'T2_star': (b.fit_res.params['tau_1'].value+\ + b.fit_res.params['tau_2'].value)/2} + else: + fitted_freq = a.fit_res.params['frequency'].value measured_detuning = fitted_freq-artificial_detuning cur_freq = a.qubit_frequency @@ -1464,6 +1473,7 @@ def calibrate_frequency_ramsey( self.freq_qubit(cur_freq) return cur_freq + # FIXME: overridden in unused class Transmon def calculate_frequency(self, calc_method=None, I_per_phi0=None, I=None): # USED_BY: find_frequency() """ diff --git a/pycqed/instrument_drivers/physical_instruments/QuTech/CC.py b/pycqed/instrument_drivers/physical_instruments/QuTech/CC.py index f5c3efb57c..18099adfd1 100644 --- a/pycqed/instrument_drivers/physical_instruments/QuTech/CC.py +++ b/pycqed/instrument_drivers/physical_instruments/QuTech/CC.py @@ -18,9 +18,10 @@ from .CCCore import CCCore from pycqed.instrument_drivers.library.Transport import Transport import pycqed.instrument_drivers.library.DIO as DIO - +from importlib import reload from qcodes.utils import validators as vals from qcodes import Instrument +# reload(CCCore) log = logging.getLogger(__name__) diff --git a/pycqed/instrument_drivers/physical_instruments/QuTech/CCCore.py b/pycqed/instrument_drivers/physical_instruments/QuTech/CCCore.py index 0fbfa6ca2b..dd51726917 100644 --- a/pycqed/instrument_drivers/physical_instruments/QuTech/CCCore.py +++ b/pycqed/instrument_drivers/physical_instruments/QuTech/CCCore.py @@ -15,6 +15,7 @@ import logging import sys +import re from pycqed.instrument_drivers.library.SCPIBase import SCPIBase from pycqed.instrument_drivers.library.Transport import Transport @@ -109,10 +110,16 @@ def assemble_and_start(self, program_string: str) -> None: # CC SCPI protocol wrapper functions ########################################################################## - def sequence_program_assemble(self, program_string: str) -> None: + def sequence_program_assemble(self, program_string: str, strip_comments: bool = True) -> None: """ upload sequence program string """ + # optionally strip comments to reduce size and limit transfer and assembly time + if(strip_comments): + print(f'original size of .vq1asm {len(program_string)}') + program_string = re.sub(r"#.*", "", program_string) + print(f'size of .vq1asm {len(program_string)} after stripping comments') + # check size, because overrunning gives irrecoverable errors. FIXME: move to Transport if len(program_string) > self._MAX_PROG_STR_LEN: raise RuntimeError(f'source program size {len(program_string)} exceeds maximum of {self._MAX_PROG_STR_LEN}') @@ -293,7 +300,7 @@ def get_status_questionable_instrument_idetail_diocal_enable(self, ccio: int) -> # constants ########################################################################## - _MAX_PROG_STR_LEN = 40*1024*1024-1024 # size of CC input buffer, minus some room for command. FIXME: get from instrument + _MAX_PROG_STR_LEN = 150*1024*1024-1024 # size of CC input buffer, minus some room for command. FIXME: get from instrument # trace units TRACE_CCIO_DEV_IN = 0 diff --git a/pycqed/instrument_drivers/physical_instruments/Transport.py b/pycqed/instrument_drivers/physical_instruments/Transport.py index 89f70e6c9f..3b9dbc71b8 100644 --- a/pycqed/instrument_drivers/physical_instruments/Transport.py +++ b/pycqed/instrument_drivers/physical_instruments/Transport.py @@ -44,7 +44,7 @@ class IPTransport(Transport): def __init__(self, host: str, port: int = 5025, - timeout = 30.0, + timeout = 60.0, snd_buf_size: int = 512 * 1024) -> None: """ establish connection, e.g. IPTransport('192.168.0.16', 4000) diff --git a/pycqed/instrument_drivers/physical_instruments/ZurichInstruments/UHFQuantumController.py b/pycqed/instrument_drivers/physical_instruments/ZurichInstruments/UHFQuantumController.py index 498d0dc50b..de77c96a3b 100644 --- a/pycqed/instrument_drivers/physical_instruments/ZurichInstruments/UHFQuantumController.py +++ b/pycqed/instrument_drivers/physical_instruments/ZurichInstruments/UHFQuantumController.py @@ -61,6 +61,7 @@ import inspect import numpy as np from typing import Tuple,List +from importlib import reload import pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument as zibase import pycqed.instrument_drivers.physical_instruments.ZurichInstruments.UHFQA_core as uhf @@ -70,7 +71,7 @@ from qcodes.utils.helpers import full_class log = logging.getLogger(__name__) - +reload(uhf) ########################################################################## # Exceptions ########################################################################## @@ -141,11 +142,9 @@ def __init__(self, # Holds the number of configured cases self._cases = None - super().__init__(name=name, device=device, interface=interface, address=address, server=server, port=port, nr_integration_channels=nr_integration_channels, **kw) - t1 = time.time() log.info(f'{self.devname}: Initialized UHFQC in {t1 - t0:.3f}s') @@ -1020,7 +1019,6 @@ def _find_valid_delays(self, awg_nr, mask_value: int): configured bits. In addition, it compares the recorded DIO codewords to an expected sequence to make sure that no codewords are sampled incorrectly.""" log.debug("{self.devname}: Finding valid delays") - vld_mask = 1 << self.geti('awgs/{}/dio/valid/index'.format(awg_nr)) vld_polarity = self.geti('awgs/{}/dio/valid/polarity'.format(awg_nr)) strb_mask = (1 << self.geti('awgs/{}/dio/strobe/index'.format(awg_nr))) @@ -1041,7 +1039,8 @@ def _find_valid_delays(self, awg_nr, mask_value: int): self.setd('raw/dios/0/delay', delay) # in 1/300 MHz = 3.33 ns steps try: # LabOne 22.02 and higher: clear the sticky timing error detection bits - self.setd('raw/dios/0/error/timingclear', 0xffffffff) + # self.setd('raw/dios/0/error/timingclear', 0xffffffff) + pass except RuntimeError: # no timingclear node available pass @@ -1050,11 +1049,13 @@ def _find_valid_delays(self, awg_nr, mask_value: int): for awg in [0]: try: # LabOne 22.02 and higher: read out timing errors that accumulated since the last call to "timingclear" - error_timing = self.geti('raw/dios/0/error/timingsticky') + # error_timing = self.geti('raw/dios/0/error/timingsticky') + pass except RuntimeError: - error_timing = self.geti('raw/dios/0/error/timing') - if error_timing & combined_mask != 0: - valid_sequence = False + # error_timing = self.geti('raw/dios/0/error/timing') + pass + # if error_timing & combined_mask != 0: + # valid_sequence = False if valid_sequence: valid_delays.append(delay) diff --git a/pycqed/instrument_drivers/physical_instruments/ZurichInstruments/ZI_HDAWG8.py b/pycqed/instrument_drivers/physical_instruments/ZurichInstruments/ZI_HDAWG8.py index f162a2c696..ad87fd100a 100644 --- a/pycqed/instrument_drivers/physical_instruments/ZurichInstruments/ZI_HDAWG8.py +++ b/pycqed/instrument_drivers/physical_instruments/ZurichInstruments/ZI_HDAWG8.py @@ -96,7 +96,9 @@ from qcodes.utils import validators from qcodes.instrument.parameter import ManualParameter from qcodes.utils.helpers import full_class +from importlib import reload +reload(zibase) log = logging.getLogger(__name__) ########################################################################## @@ -330,6 +332,9 @@ def upload_commandtable(self, commandtable: Union[str, dict], awg_nr: int): """ if isinstance(commandtable, dict): commandtable = json.dumps(commandtable, sort_keys=True, indent=2) + # inserting here the 'hack' discussed with ZI. Hany and Leo DC. 2022/06/15 + # internal ZI ticket: HULK-788 + commandtable = commandtable+" "*10000 # validate json (without schema) try: @@ -688,3 +693,22 @@ def calibrate_dio_protocol(self, dio_mask: int, expected_sequence: List, port: i def calibrate_CC_dio_protocol(self, CC, verbose=False) -> None: raise DeprecationWarning("calibrate_CC_dio_protocol is deprecated, use instrument_drivers.library.DIO.calibrate") + + # region Class Methods + @classmethod + def from_other_instance(cls, instance: ZI_HDAWG8) -> 'ZI_HDAWG8': + """:return: Class-method constructor based on (other) instrument instance.""" + name: str = instance.name + device: str = instance.devname + codeword_protocol: str = instance.cfg_codeword_protocol() + dios_0_interface: int = instance.get('dios_0_interface') + # Close current instance + instance.stop() + instance.close() + # Connect new instance + result_instance = ZI_HDAWG8(name=name, device=device) + result_instance.cfg_codeword_protocol(codeword_protocol) + result_instance.set('dios_0_interface', dios_0_interface) + result_instance.clear_errors() + return result_instance + # endregion diff --git a/pycqed/instrument_drivers/physical_instruments/ZurichInstruments/ZI_HDAWG8_LongCryoscope.py b/pycqed/instrument_drivers/physical_instruments/ZurichInstruments/ZI_HDAWG8_LongCryoscope.py new file mode 100644 index 0000000000..7dc1c8b5cc --- /dev/null +++ b/pycqed/instrument_drivers/physical_instruments/ZurichInstruments/ZI_HDAWG8_LongCryoscope.py @@ -0,0 +1,115 @@ +# ------------------------------------------- +# Module containing subclass of ZI_HDAWG8. +# Subclass overwrites _get_waveform_table and _codeword_table_preamble +# ------------------------------------------- +from dataclasses import dataclass, field +from typing import List, Dict, Tuple +import numpy as np +from pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_HDAWG8 import ZI_HDAWG8 + + +class ZI_HDAWG8_LongCryoscope(ZI_HDAWG8): + """ + Behaviour class, driver for ZurichInstruments HDAWG8 instrument. + Intended for Long-Cryoscope measurement where a single flux-pulse waveform is uploaded. + NOTE: Only intended to use for Flux-HDAWG implementations. + """ + + # region Class Properties + @property + def long_cryoscope_channel(self) -> int: + return self._long_cryocopse_channel + + @long_cryoscope_channel.setter + def long_cryoscope_channel(self, value: int) -> None: + assert 0 <= value <= 7, f"Assumes channel value between 0 and 7, instead: {value}." + self._long_cryocopse_channel = value + + @property + def is_odd_channel(self) -> bool: + """:return: Whether (focus) long-cryoscope channel is odd.""" + return self.long_cryoscope_channel % 2 + # endregion + + # region Class Constructor + def __init__(self, name: str, device: str, interface: str = '1GbE', server: str = 'localhost', port=8004, num_codewords: int = 64, **kw): + super().__init__(name=name, device=device, interface=interface, server=server, port=port, num_codewords=num_codewords, **kw) + self._long_cryoscope_channel: int # 0-indexed, 0-7 + # endregion + + # region Class Methods + def _get_waveform_table(self, awg_nr: int) -> list: + """ + Returns the waveform table. + + The waveform table determines the mapping of waveforms to DIO codewords. + The index of the table corresponds to the DIO codeword. + The entry is a tuple of waveform names. + + Example: + ["wave_ch7_cw000", "wave_ch8_cw000", + "wave_ch7_cw001", "wave_ch8_cw001", + "wave_ch7_cw002", "wave_ch8_cw002"] + + The waveform table generated depends on the awg_nr and the codeword + protocol. + """ + assert self.cfg_codeword_protocol() == 'flux', f"Assumes this HDAWG is used as flux instrument, instead: {self.cfg_codeword_protocol()}." + ch: int = awg_nr * 2 + wf_table = [] + cw_r: int = 1 + cw_l: int = 0 + + is_odd_channel: bool = self.is_odd_channel + if is_odd_channel: + cw_r = 0 + cw_l = 1 + wf_table.append( + (zibase.gen_waveform_name(ch, cw_l), + zibase.gen_waveform_name(ch + 1, cw_r)) + ) + return wf_table + + def _codeword_table_preamble(self, awg_nr: int): + """ + Defines a snippet of code to use in the beginning of an AWG program in order to define the waveforms. + The generated code depends on the instrument type. For the HDAWG instruments, we use the seWaveDIO + function. + """ + program = '' + + wf_table = self._get_waveform_table(awg_nr=awg_nr) + is_odd_channel: bool = self.is_odd_channel + if is_odd_channel: + dio_cws = [0, 1] + else: + dio_cws = [0, 8] + + # Assuming wf_table looks like this: [('wave_ch7_cw000', 'wave_ch8_cw000'), ('wave_ch7_cw000', 'wave_ch8_cw001')] + for dio_cw, (wf_l, wf_r) in zip(dio_cws, wf_table): + csvname_l = self.devname + '_' + wf_l + csvname_r = self.devname + '_' + wf_r + + if self.cfg_sideband_mode() == 'static' or self.cfg_codeword_protocol() == 'flux': + program += f'setWaveDIO({dio_cw}, \"{csvname_l}\", \"{csvname_r}\");\n' + else: + raise Exception(f"Unknown modulation type '{self.cfg_sideband_mode()}' and codeword protocol '{self.cfg_codeword_protocol()}'") + return program + + @classmethod + def from_other_instance(cls, instance: ZI_HDAWG8) -> 'ZI_HDAWG8_LongCryoscope': + """:return: Class-method constructor based on (other) instrument instance.""" + name: str = instance.name + device: str = instance.devname + codeword_protocol: str = instance.cfg_codeword_protocol() + dios_0_interface: int = instance.get('dios_0_interface') + # Close current instance + instance.stop() + instance.close() + # Connect new instance + result_instance = ZI_HDAWG8_LongCryoscope(name=name, device=device) + result_instance.cfg_codeword_protocol(codeword_protocol) + result_instance.set('dios_0_interface', dios_0_interface) + result_instance.clear_errors() + return result_instance + # endregion diff --git a/pycqed/instrument_drivers/physical_instruments/ZurichInstruments/ZI_HDAWG8_PRNG.py b/pycqed/instrument_drivers/physical_instruments/ZurichInstruments/ZI_HDAWG8_PRNG.py new file mode 100644 index 0000000000..26040bf7c6 --- /dev/null +++ b/pycqed/instrument_drivers/physical_instruments/ZurichInstruments/ZI_HDAWG8_PRNG.py @@ -0,0 +1,161 @@ +# ------------------------------------------- +# Module containing subclass of ZI_HDAWG8. +# Subclass overwrites upload-codeword-program +# Adds functionality to: +# - read dio 32-bit number +# - mask relevant part for AWG core +# - compare to pseudo random number generator (PRNG) +# - run codeword based on this (random) outcome +# ------------------------------------------- +from dataclasses import dataclass, field +from typing import List, Dict, Tuple +import numpy as np +from pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_HDAWG8 import ZI_HDAWG8 + + +@dataclass(frozen=True) +class StochasticTrigger: + """ + Data class, containing information about a stochastic trigger. + """ + awg_core_index: int + """HDAWG core index (0, 1, 2 or 3).""" + trigger_probability: float + """Probability between 0 and 1.""" + default_codeword: int + """Codeword played (by default) when trigger probability is not satisfied.""" + trigger_codeword: int + """Codeword played when trigger probability is satisfied.""" + + # region Class Properties + @property + def codeword_shift(self) -> int: + """ + Example: 3221291008 => 11-0000000-0000001-00-0000000-0000000 + --AWG4- --AWG3- --AWG2- --AWG1- (Note reverse order) + :return: codeword shift based on 32-bit DIO conventions. + """ + return { + 0: 0, + 1: 7, + 2: 16, + 3: 23, + }[self.awg_core_index] + + @property + def stochastic_trigger_bit(self) -> float: + bit_size: int = 7 + return self.codeword_shift + bit_size - 1 + + @property + def codeword_mask(self) -> str: + stochastic_trigger_bit: int = self.stochastic_trigger_bit + return hex(2 ** stochastic_trigger_bit) + + @property + def probability_range(self) -> Tuple[int, int]: + """:return: Integer range for AWG PRNG range.""" + invalid_probability: bool = self.trigger_probability <= 0 or self.trigger_probability > 1 + if invalid_probability: + raise ValueError( + f"Probability must be between 0 and 1 (exclusive of 0.0, instead {self.trigger_probability}") + # lb: int = 0 + # ub: int = max( + # 1, + # min( + # round(1. / self.trigger_probability), + # 2 ** 16, + # ) + # ) - 1 # PRNG range 0 to 2**16 - 1 + # return lb, ub + return 0, 2**16 - 2 + + @property + def probability_cutoff(self) -> int: + return int(round(self.trigger_probability * 2**16)) - 1 + # endregion + + # region Class Methods + def construct_awg_stochastic_program(self) -> str: + """:return: Program for AWG core to run stochastic trigger.""" + prng_lb, prng_ub = self.probability_range + result: str = ( + "var dioIndex = 0;\n" + f"setPRNGRange({prng_lb}, {prng_ub});\n" + f"var prngCutoff = {self.probability_cutoff};\n" + f"var dioMask = {self.codeword_mask};\n" + f"var triggerCodeword = {self.trigger_codeword};\n" + f"var defaultCodeword = {self.default_codeword};\n" + "\n" + "while (1) {\n" + " var prng_value = getPRNGValue();\n" + " // Wait for a trigger on the DIO interface\n" + " waitDIOTrigger();\n" + " // Process DIO trigger signal\n" + " if (getDIO() & dioMask) {\n" # If PRNG bit is active [1XXXXXX] + " if (prng_value < prngCutoff) {\n" # If PRNG value != 0 + " executeTableEntry(triggerCodeword);\n" + " } else {\n" + " executeTableEntry(defaultCodeword);\n" + " }\n" + " } else {\n" + " playWaveDIO();\n" + " }\n" + "}" + ) + return result + # endregion + + +class ZI_HDAWG8_PRNG(ZI_HDAWG8): + """ + Behaviour class, driver for ZurichInstruments HDAWG8 instrument. + Codeword program reserves last bit of 7-bit codeword for PRNG trigger. + The behaviour of this trigger is set to do nothing by default and can be updated manually. + An important difference between this class and the parent class is the change in codeword program. + By adding processing logic to the AWG core, the minimal time to receive, process and execute pulses is extended. + Note that the default behaviour of doing 'nothing' keeps the same processing step for timing consistencies. + """ + + # region Class Constructor + def __init__(self, name: str, device: str, interface: str = '1GbE', server: str = 'localhost', port=8004, num_codewords: int = 64, **kw): + super().__init__(name=name, device=device, interface=interface, server=server, port=port, num_codewords=num_codewords, **kw) + identity_codeword: int = 0 + self.awg_stochastic_triggers: Dict[int, StochasticTrigger] = { + 0: StochasticTrigger(awg_core_index=0, trigger_probability=1.0, default_codeword=identity_codeword, trigger_codeword=identity_codeword), + 1: StochasticTrigger(awg_core_index=1, trigger_probability=1.0, default_codeword=identity_codeword, trigger_codeword=identity_codeword), + 2: StochasticTrigger(awg_core_index=2, trigger_probability=1.0, default_codeword=identity_codeword, trigger_codeword=identity_codeword), + 3: StochasticTrigger(awg_core_index=3, trigger_probability=1.0, default_codeword=identity_codeword, trigger_codeword=identity_codeword), + } + # endregion + + # region Class Methods + def get_stochastic_trigger(self, awg_core_index: int) -> StochasticTrigger: + """:return: Stochastic trigger dataclass if awg_core_index exists.""" + return self.awg_stochastic_triggers[awg_core_index] + + def set_awg_stochastic_trigger(self, stochastic_trigger: StochasticTrigger) -> None: + """:sets: stochastic trigger dataclass.""" + awg_core_index: int = stochastic_trigger.awg_core_index + allowed_awg_core_indices: List[int] = [0, 1, 2, 3] + if awg_core_index not in allowed_awg_core_indices: + raise ValueError(f"Choice must be within {allowed_awg_core_indices}, instead {awg_core_index}.") + self.awg_stochastic_triggers[awg_core_index] = stochastic_trigger + return None + + def upload_codeword_program(self, awgs: np.ndarray = np.arange(4)): + """ + Generates a program that plays the codeword waves for each channel. + :param awgs: (np.ndarray) the awg numbers to which to upload the codeword program. + """ + self._configure_codeword_protocol() + + # Type conversion to ensure lists do not produce weird results + awgs = np.array(awgs) + if awgs.shape == (): + awgs = np.array([awgs]) + + for awg_nr in awgs: + self._awg_program[awg_nr] = self.awg_stochastic_triggers[awg_nr].construct_awg_stochastic_program() + self._awg_needs_configuration[awg_nr] = True + # endregion diff --git a/pycqed/measurement/VNA_module.py b/pycqed/measurement/VNA_module.py index b1713b36b1..a098547126 100644 --- a/pycqed/measurement/VNA_module.py +++ b/pycqed/measurement/VNA_module.py @@ -68,7 +68,7 @@ def acquire_single_linear_frequency_span(file_name, start_freq=None, VNA_instr.timeout(10**4) t_start = ma.a_tools.current_timestamp() - MC_instr.run(name=file_name) + MC_instr.run(name=file_name, disable_snapshot_metadata=True) MC_instr.soft_avg(old_soft_avg) t_stop = ma.a_tools.current_timestamp() t_meas = ma.a_tools.get_timestamps_in_range(t_start, t_stop, label=file_name) diff --git a/pycqed/measurement/calibration_toolbox.py b/pycqed/measurement/calibration_toolbox.py index eb33d4b26a..7bf55737a2 100644 --- a/pycqed/measurement/calibration_toolbox.py +++ b/pycqed/measurement/calibration_toolbox.py @@ -93,7 +93,8 @@ def mixer_carrier_cancellation( MC.set_sweep_functions([chI_par, chQ_par]) MC.set_detector_function(detector) # sets test_detector MC.set_adaptive_function_parameters(ad_func_pars) - MC.run(name=label, mode='adaptive') + MC.run(name=label, mode='adaptive', + disable_snapshot_metadata = disable_metadata) a = ma.OptimizationAnalysis(label=label) # v2 creates a pretty picture of the optimizations ma.OptimizationAnalysis_v2(label=label) @@ -137,7 +138,7 @@ def multi_channel_mixer_carrier_cancellation(SH, source, MC, SH.ref_lvl(SH_ref_level) detector = det.Signal_Hound_fixed_frequency( SH, frequency=(source.frequency()), - Navg=5, delay=0.0, prepare_each_point=False) + Navg=5, delay=0.0, prepare_for_each_point=False) if x0 is None: x0 = [0.0]*len(channel_pars) @@ -145,7 +146,7 @@ def multi_channel_mixer_carrier_cancellation(SH, source, MC, ad_func_pars = {'adaptive_function': nelder_mead, 'x0': x0, 'initial_step': [init_stepsize]*len(channel_pars), - 'no_improv_break': 15, + # 'no_improv_break': 15, 'minimize': True, 'maxiter': 500} MC.set_sweep_functions(channel_pars) diff --git a/pycqed/measurement/composite_detector_functions.py b/pycqed/measurement/composite_detector_functions.py index e66b308bcd..a16f688ce4 100644 --- a/pycqed/measurement/composite_detector_functions.py +++ b/pycqed/measurement/composite_detector_functions.py @@ -77,7 +77,7 @@ # # def __init__(self, measurement_name, MC, AWG, acquisition_instr, # pulse_pars, RO_pars, raw=True, analyze=True, upload=True, -# IF=None, weight_function_I=0, weight_function_Q=1, +# IF=None, weight_chI=0, weight_chQ=1, # optimized_weights=False, one_weight_function_UHFQC=False, # wait=0.0, close_fig=True, SSB=False, # nr_averages=1024, integration_length=1e-6, @@ -158,8 +158,8 @@ # nr_shots=min(self.nr_shots, 4094))) # if self.SSB: # self.UHFQC.prepare_SSB_weight_and_rotation( -# IF=self.IF, weight_function_I=self.weight_function_I, -# weight_function_Q=self.weight_function_Q) +# IF=self.IF, weight_chI=self.weight_function_I, +# weight_chQ=self.weight_function_Q) # else: # if self.IF == None: # raise ValueError( @@ -167,8 +167,8 @@ # else: # self.UHFQC.prepare_DSB_weight_and_rotation( # IF=self.IF, -# weight_function_I=self.weight_function_I, -# weight_function_Q=self.weight_function_Q) +# weight_chI=self.weight_function_I, +# weight_chQ=self.weight_function_Q) # elif 'DDM' in str(self.acquisition_instr): # self.MC.set_detector_function( # det.DDM_integration_logging_det( @@ -179,8 +179,8 @@ # nr_shots=min(self.nr_shots, 8000))) # if self.SSB: # self.DDM.prepare_SSB_weight_and_rotation( -# IF=self.IF, weight_function_I=self.weight_function_I, -# weight_function_Q=self.weight_function_Q) +# IF=self.IF, weight_chI=self.weight_function_I, +# weight_chQ=self.weight_function_Q) # #not yet implemented # # else: # # if self.IF == None: @@ -189,8 +189,8 @@ # # else: # # self.UHFQC.prepare_DSB_weight_and_rotation( # # IF=self.IF, -# # weight_function_I=self.weight_function_I, -# # weight_function_Q=self.weight_function_Q) +# # weight_chI=self.weight_function_I, +# # weight_chQ=self.weight_function_Q) # # def acquire_data_point(self, *args, **kw): # self.time_start = time.time() diff --git a/pycqed/measurement/cz_cost_functions.py b/pycqed/measurement/cz_cost_functions.py index e4c578c487..75fbc1b876 100644 --- a/pycqed/measurement/cz_cost_functions.py +++ b/pycqed/measurement/cz_cost_functions.py @@ -234,6 +234,7 @@ def parity_check_cost_function( ramsey_qubits: Union[List[str], bool]=False, refocusing: bool=True, phase_offsets: List[float]=None, + pc_repetitions: int=1, phase_weight_factor: float=1, include_missing_frac_cost: bool=False, wait_time_before_flux_ns: int=0, @@ -274,6 +275,7 @@ def parity_check_cost_function( wait_time_after_flux_ns=wait_time_after_flux_ns, label_suffix=counter_param(), disable_metadata=disable_metadata, + pc_repetitions=pc_repetitions, plotting=plotting, **kwargs ) diff --git a/pycqed/measurement/detector_functions.py b/pycqed/measurement/detector_functions.py index d164e03b0a..2ce7bf7ac4 100644 --- a/pycqed/measurement/detector_functions.py +++ b/pycqed/measurement/detector_functions.py @@ -4,22 +4,41 @@ NB: hardware-specific detectors have been split-off in separate files, see the compatibility imports below ''' +import qcodes as qc import numpy as np +import numpy.fft as fft import logging import time +from string import ascii_uppercase +from packaging import version from deprecated import deprecated from pycqed.analysis.fit_toolbox import functions as fn # compatibility imports for functions that were moved under directory det_funcs. New code should use new locations -from pycqed.measurement.det_fncs.Base import Detector_Function, Mock_Detector, Multi_Detector, Soft_Detector, \ - Hard_Detector -from pycqed.measurement.det_fncs.hard.UHFQC import Multi_Detector_UHF, \ - UHFQC_input_average_detector, UHFQC_demodulated_input_avg_det, \ - UHFQC_spectroscopy_detector, UHFQC_integrated_average_detector, UHFQC_correlation_detector, \ - UHFQC_integration_logging_det, UHFQC_statistics_logging_det, UHFQC_single_qubit_statistics_logging_det -from pycqed.measurement.det_fncs.hard.SignalHound import Signal_Hound_fixed_frequency, Signal_Hound_sweeped_frequency, \ - SH_mixer_skewness_det +from pycqed.measurement.det_fncs.Base import ( + Detector_Function, + Mock_Detector, + Multi_Detector, + Soft_Detector, + Hard_Detector, +) +from pycqed.measurement.det_fncs.hard.UHFQC import ( + Multi_Detector_UHF, + UHFQC_input_average_detector, + UHFQC_demodulated_input_avg_det, + UHFQC_spectroscopy_detector, + UHFQC_integrated_average_detector, + UHFQC_correlation_detector, + UHFQC_integration_logging_det, + UHFQC_statistics_logging_det, + UHFQC_single_qubit_statistics_logging_det, +) +from pycqed.measurement.det_fncs.hard.SignalHound import ( + Signal_Hound_fixed_frequency, + Signal_Hound_sweeped_frequency, + SH_mixer_skewness_det, +) from qcodes.instrument.parameter import _BaseParameter @@ -27,6 +46,29 @@ log = logging.getLogger(__name__) +############################################################################### +############################################################################### +#################### None Detector #################### +############################################################################### +############################################################################### + + +class None_Detector(Detector_Function): + + def __init__(self, **kw): + super(None_Detector, self).__init__() + self.detector_control = 'soft' + self.set_kw() + self.name = 'None_Detector' + self.value_names = ['None'] + self.value_units = ['None'] + + def acquire_data_point(self, **kw): + ''' + Returns something random for testing + ''' + return np.random.random() + ########################################################################## ########################################################################## #################### Hardware Controlled Detectors ############### @@ -400,8 +442,7 @@ def acquire_single_data_point(self, **kw): c = 0 while (not passed): S21 = self.HS.probe() - cond_a = ( - abs(S21) / self.last > self.threshold) or (self.last / abs(S21) > self.threshold) + cond_a = (abs(S21) / self.last > self.threshold) or (self.last / abs(S21) > self.threshold) cond_b = self.HS.frequency() > self.last_frequency if cond_a and cond_b: passed = False diff --git a/pycqed/measurement/error_injection_experiments/__init__.py b/pycqed/measurement/error_injection_experiments/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pycqed/measurement/error_injection_experiments/coherent_vs_stochastic_allxy.py b/pycqed/measurement/error_injection_experiments/coherent_vs_stochastic_allxy.py new file mode 100644 index 0000000000..28de3244c9 --- /dev/null +++ b/pycqed/measurement/error_injection_experiments/coherent_vs_stochastic_allxy.py @@ -0,0 +1,128 @@ +# ------------------------------------------- +# Module containing measurement functionality that attempts to setup coherent vs. stochastic measurement error. +# ------------------------------------------- +import numpy as np +import logging +from pycqed.measurement.openql_experiments import single_qubit_oql as sqo +from pycqed.measurement import sweep_functions as swf +from pycqed.analysis import measurement_analysis as ma +from pycqed.analysis_v2 import measurement_analysis as ma2 +from pycqed.measurement.openql_experiments.openql_helpers import OqlProgram +from pycqed.instrument_drivers.meta_instrument.qubit_objects.CCL_Transmon import CCLight_Transmon as Transmon +from pycqed.instrument_drivers.meta_instrument.LutMans.mw_lutman import AWG8_MW_LutMan +from pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_HDAWG8 import ZI_HDAWG8 +from pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_HDAWG8_PRNG import ZI_HDAWG8_PRNG + + +log = logging.getLogger(__name__) + + +def allxy_default_coherent_and_stochastic( + qubit_idx: int, + platf_cfg: str, + coherence_codeword: str, + stochastic_codeword: str = 'PRNG', + double_points: bool = True, + prepend_msmt=False, + wait_time_after_prepend_msmt=0 + ): + """ + Single qubit AllXY sequence. + Writes output files to the directory specified in openql. + Output directory is set as an attribute to the program for convenience. + + Input pars: + qubit_idx: int specifying the target qubit (starting at 0) + platf_cfg: filename of the platform config file + double_points: if true repeats every element twice + intended for evaluating the noise at larger time scales + Returns: + p: OpenQL Program object + + + """ + p = OqlProgram("AllXY_default_coherent_stochastic", platf_cfg) + normal_rotation_cw: str = stochastic_codeword # stochastic_codeword # coherence_codeword # 'rx180' + + allXY = [['i', 'i'], [normal_rotation_cw, normal_rotation_cw], ['ry180', 'ry180'], + [normal_rotation_cw, 'ry180'], ['ry180', normal_rotation_cw], + ['rx90', 'i'], ['ry90', 'i'], ['rx90', 'ry90'], + ['ry90', 'rx90'], ['rx90', 'ry180'], ['ry90', normal_rotation_cw], + [normal_rotation_cw, 'ry90'], ['ry180', 'rx90'], ['rx90', normal_rotation_cw], + [normal_rotation_cw, 'rx90'], ['ry90', 'ry180'], ['ry180', 'ry90'], + [normal_rotation_cw, 'i'], ['ry180', 'i'], ['rx90', 'rx90'], + ['ry90', 'ry90']] + + # this should be implicit + if 0: # FIXME: p.set_sweep_points has been replaced by p.sweep_points, since that was missing here they are probably not necessary for this function + p.set_sweep_points(np.arange(len(allXY), dtype=float)) + + for i, xy in enumerate(allXY): + if double_points: + js = 2 + else: + js = 1 + for j in range(js): + k = p.create_kernel("AllXY_{}_{}".format(i, j)) + k.prepz(qubit_idx) + if prepend_msmt: + k.measure(qubit_idx) + if wait_time_after_prepend_msmt: + k.gate("wait", [qubit_idx], wait_time_after_prepend_msmt) + k.gate("wait", []) + k.gate(xy[0], [qubit_idx]) + k.gate(xy[1], [qubit_idx]) + k.measure(qubit_idx) + p.add_kernel(k) + + p.compile() + return p + + +def measure_stochastic_allxy( + transmon: Transmon, + MC=None, + label: str = '', + analyze=True, + close_fig=True, + prepare_for_timedomain=True, + prepend_msmt: bool = False, + wait_time_after_prepend_msmt: int = 0, + disable_metadata=False, +): + if MC is None: + MC = transmon.instr_MC.get_instr() + instr_lutman: AWG8_MW_LutMan = transmon.instr_LutMan_MW.get_instr() + instr_hdawg: ZI_HDAWG8 = instr_lutman.find_instrument(instr_lutman.AWG()) + + coherent_codeword: str = "rx175" + coherent_rotation: int = 175 + lutmap_index: int = 31 + instr_lutman.LutMap()[lutmap_index] = {"name": coherent_codeword, "theta": coherent_rotation, "phi": 0, "type": "ge"} + log.warn(f"Overwriting {instr_lutman} lutmap, at index: {lutmap_index} with key: {coherent_codeword}.") + instr_lutman.load_waveforms_onto_AWG_lookuptable() + assert isinstance(instr_hdawg, ZI_HDAWG8_PRNG), "Requires codeword program that handles stochastic processing" + + if prepare_for_timedomain: + transmon.prepare_for_timedomain() + p = allxy_default_coherent_and_stochastic( + qubit_idx=transmon.cfg_qubit_nr(), + platf_cfg=transmon.cfg_openql_platform_fn(), + coherence_codeword=coherent_codeword, + stochastic_codeword='PRNG', + double_points=True, + prepend_msmt=prepend_msmt, + wait_time_after_prepend_msmt=wait_time_after_prepend_msmt + ) + s = swf.OpenQL_Sweep( + openql_program=p, + CCL=transmon.instr_CC.get_instr(), + ) + d = transmon.int_avg_det + MC.set_sweep_function(s) + MC.set_sweep_points(np.arange(42) if not prepend_msmt else np.arange( 2 *42)) + MC.set_detector_function(d) + MC.run('AllXY ' +transmon.msmt_suffix +label, disable_snapshot_metadata=disable_metadata) + if analyze: + a = ma.AllXY_Analysis(close_main_fig=close_fig, prepend_msmt=prepend_msmt) + return a diff --git a/pycqed/measurement/error_injection_experiments/coherent_vs_stochastic_flipping.py b/pycqed/measurement/error_injection_experiments/coherent_vs_stochastic_flipping.py new file mode 100644 index 0000000000..48921645cc --- /dev/null +++ b/pycqed/measurement/error_injection_experiments/coherent_vs_stochastic_flipping.py @@ -0,0 +1,183 @@ +# ------------------------------------------- +# Module containing measurement functionality that attempts to setup coherent vs. stochastic measurement error. +# ------------------------------------------- +import numpy as np +import logging +from pycqed.measurement.openql_experiments import single_qubit_oql as sqo +from pycqed.measurement import sweep_functions as swf +from pycqed.analysis_v2 import measurement_analysis as ma2 +from pycqed.measurement.openql_experiments.openql_helpers import OqlProgram +from pycqed.instrument_drivers.meta_instrument.qubit_objects.CCL_Transmon import CCLight_Transmon as Transmon +from pycqed.instrument_drivers.meta_instrument.LutMans.mw_lutman import AWG8_MW_LutMan +from pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_HDAWG8 import ZI_HDAWG8 +from pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_HDAWG8_PRNG import ZI_HDAWG8_PRNG + + +log = logging.getLogger(__name__) + + + +def flipping_default_coherent_and_stochastic( + qubit_idx: int, + number_of_flips, + platf_cfg: str, + coherence_codeword: str, + stochastic_codeword: str = 'PRNG', + equator: bool = True, + cal_points: bool = True, + ax: str = 'x', + angle: str = '180', + ) -> OqlProgram: + """ + Generates a flipping sequence that performs multiple pi-pulses + Basic sequence: + - (X)^n - RO + or + - (Y)^n - RO + or + - (X90)^2n - RO + or + - (Y90)^2n - RO + + + Input pars: + qubit_idx: int specifying the target qubit (starting at 0) + number_of_flips: array of ints specifying the sweep points + platf_cfg: filename of the platform config file + equator: if True add an extra pi/2 pulse at the end to + make the state end at the equator. + cal_points: replaces last 4 points by calibration points + + Returns: + p: OpenQL Program object + """ + p = OqlProgram("flipping_default_coherent_stochastic", platf_cfg) + normal_rotation_cw: str = stochastic_codeword # coherence_codeword # 'rx180' + + for i, n in enumerate(number_of_flips): + k = p.create_kernel('flipping_{}'.format(i)) + k.prepz(qubit_idx) + if cal_points and (i == (len(number_of_flips)-4) or i == (len(number_of_flips)-3)): + k.measure(qubit_idx) + elif cal_points and (i == (len(number_of_flips)-2) or i == (len(number_of_flips)-1)): + if ax == 'y': + k.gate('ry180', [qubit_idx]) + else: + k.gate(normal_rotation_cw, [qubit_idx]) + k.measure(qubit_idx) + else: + if equator: + if ax == 'y': + k.gate('ry90', [qubit_idx]) + else: + k.gate('rx90', [qubit_idx]) + + for j in range(n): + if ax == 'y' and angle == '90': + k.gate('ry90', [qubit_idx]) + k.gate('ry90', [qubit_idx]) + elif ax == 'y' and angle == '180': + k.gate('ry180', [qubit_idx]) + elif angle == '90': + k.gate('rx90', [qubit_idx]) + k.gate('rx90', [qubit_idx]) + else: + k.gate(normal_rotation_cw, [qubit_idx]) + k.measure(qubit_idx) + p.add_kernel(k) + + p.compile() + return p + + +def measure_stochastic_flipping( + transmon: Transmon, + number_of_flips = np.arange(0, 61, 2), + equator = True, + prepare_for_timedomain = True, + MC = None, + ax = 'x', + angle = '180', + label = '', + disable_metadata = False + ): + """ + Measurement for fine-tuning of the pi and pi/2 pulse amplitudes. Executes sequence + pi (repeated N-times) - pi/2 - measure + with variable number N. In this way the error in the amplitude of the MW pi pulse + accumulate allowing for fine tuning. Alternatively N repetitions of the pi pulse + can be replaced by 2N repetitions of the pi/2-pulse + + Args: + number_of_flips (array): + number of pi pulses to apply. It is recommended to use only even numbers, + since then the expected signal has a sine shape. Otherwise it has -1^N * sin shape + which will not be correctly analyzed. + + equator (bool); + specify whether to apply the final pi/2 pulse. Setting to False makes the sequence + first-order insensitive to pi-pulse amplitude errors. + + ax (str {'x', 'y'}): + axis arour which the pi pulses are to be performed. Possible values 'x' or 'y' + + angle (str {'90', '180'}):r + specifies whether to apply pi or pi/2 pulses. Possible values: '180' or '90' + + update (bool): + specifies whether to update parameter controlling MW pulse amplitude. + This parameter is mw_vsm_G_amp in VSM case or mw_channel_amp in no-VSM case. + Update is performed only if change by more than 0.2% (0.36 deg) is needed. + """ + + if MC is None: + MC = transmon.instr_MC.get_instr() + instr_lutman: AWG8_MW_LutMan = transmon.instr_LutMan_MW.get_instr() + instr_hdawg: ZI_HDAWG8 = instr_lutman.find_instrument(instr_lutman.AWG()) + + coherent_codeword: str = "rx175" + coherent_rotation: int = 175 + lutmap_index: int = 31 + instr_lutman.LutMap()[lutmap_index] = {"name": coherent_codeword, "theta": coherent_rotation, "phi": 0, "type": "ge"} + instr_lutman.LutMap()[32] = {"name": "rx170", "theta": 180, "phi": 0, "type": "ge"} + log.warn(f"Overwriting {instr_lutman} lutmap, at index: {lutmap_index} with key: {coherent_codeword}.") + instr_lutman.load_waveforms_onto_AWG_lookuptable() + + # allow flipping only with pi/2 or pi, and x or y pulses + assert angle in ['90', '180'] + assert ax.lower() in ['x', 'y'] + assert isinstance(instr_hdawg, ZI_HDAWG8_PRNG), "Requires codeword program that handles stochastic processing" + + # append the calibration points, times are for location in plot + nf = np.array(number_of_flips) + dn = nf[1] - nf[0] + nf = np.concatenate([nf, + (nf[-1] + 1 * dn, + nf[-1] + 2 * dn, + nf[-1] + 3 * dn, + nf[-1] + 4 * dn)]) + if prepare_for_timedomain: + transmon.prepare_for_timedomain() + p = flipping_default_coherent_and_stochastic( + number_of_flips=nf, + equator = equator, + qubit_idx = transmon.cfg_qubit_nr(), + platf_cfg = transmon.cfg_openql_platform_fn(), + coherence_codeword=coherent_codeword, + stochastic_codeword='PRNG', + ax = ax.lower(), + angle = angle, + ) + s = swf.OpenQL_Sweep( + openql_program=p, + unit = '#', + CCL = transmon.instr_CC.get_instr() + ) + d = transmon.int_avg_det + MC.set_sweep_function(s) + MC.set_sweep_points(nf) + MC.set_detector_function(d) + MC.run('flipping_' + ax + angle + label + transmon.msmt_suffix, disable_snapshot_metadata = disable_metadata) + + a = ma2.FlippingAnalysis(options_dict = {'scan_label': 'flipping'}) + return a diff --git a/pycqed/measurement/error_injection_experiments/stochastic_ramsey.py b/pycqed/measurement/error_injection_experiments/stochastic_ramsey.py new file mode 100644 index 0000000000..1313df16dd --- /dev/null +++ b/pycqed/measurement/error_injection_experiments/stochastic_ramsey.py @@ -0,0 +1,131 @@ +# ------------------------------------------- +# Module containing measurement functionality that attempts to setup coherent vs. stochastic measurement error. +# ------------------------------------------- +import numpy as np +import logging +from typing import List +from pycqed.measurement.openql_experiments import single_qubit_oql as sqo +from pycqed.measurement import sweep_functions as swf +from pycqed.analysis import measurement_analysis as ma +from pycqed.analysis_v2 import measurement_analysis as ma2 +from pycqed.measurement.openql_experiments.openql_helpers import OqlProgram +from pycqed.instrument_drivers.meta_instrument.qubit_objects.CCL_Transmon import CCLight_Transmon as Transmon +from pycqed.instrument_drivers.meta_instrument.LutMans.mw_lutman import AWG8_MW_LutMan +from pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_HDAWG8 import ZI_HDAWG8 +from pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_HDAWG8_PRNG import ZI_HDAWG8_PRNG +from pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_HDAWG8_PRNG import ( + ZI_HDAWG8_PRNG, + StochasticTrigger, +) + + +log = logging.getLogger(__name__) + + +def ramsey_stochastic( + qubit_idx: int, + platf_cfg: str, + probabilities: List[float], + stochastic_codeword: str = 'PRNG', + ): + """ + """ + p = OqlProgram("Ramsey_stochastic", platf_cfg) + + for i, probs in enumerate(probabilities[:-4]): + k = p.create_kernel("Ramsey_{}".format(i)) + k.prepz(qubit_idx) + k.gate('rx90', [qubit_idx]) + # wait_nanoseconds = int(round(time/1e-9)) + k.gate(stochastic_codeword, [qubit_idx]) + # k.gate("wait", [qubit_idx], wait_nanoseconds) + k.gate('rx90', [qubit_idx]) + k.measure(qubit_idx) + p.add_kernel(k) + + # adding the calibration points + p.add_single_qubit_cal_points(qubit_idx=qubit_idx) + + p.compile() + return p + + +class OpenQLSweepUpdateHDAWG(swf.Soft_Sweep): + + def __init__(self, openql_program, CCL, awg: ZI_HDAWG8_PRNG, default_codeword: int, trigger_codeword: int, + parameter_name: str ='Points', unit: str='a.u.', + upload: bool=True): + super().__init__() + self.name = 'OpenQL_Sweep' + self.openql_program = openql_program + self.CCL = CCL + self.upload = upload + self.parameter_name = parameter_name + self.sweep_control = 'soft' + self.unit = unit + self.awg = awg + self.default_codeword = default_codeword + self.trigger_codeword = trigger_codeword + + def prepare(self, **kw): + print('Hi we prepare here') + if self.upload: + self.CCL.eqasm_program(self.openql_program.filename) + + def set_parameter(self, val): + print('Hi we set-value here', f'probability: {val}') + awg_sequencer_nr = 2 # X1 -> AWG8_8499 + stochastic_trigger_info = StochasticTrigger( + awg_core_index=awg_sequencer_nr, + trigger_probability=min(1.0, val), + default_codeword=self.default_codeword, + trigger_codeword=self.trigger_codeword, + ) + self.awg.set_awg_stochastic_trigger(stochastic_trigger_info) + self.awg.upload_codeword_program() + + +def measure_stochastic_ramsey( + transmon: Transmon, + label: str = '', + MC=None, + prepare_for_timedomain=True, + disable_metadata=False, +): + """ + """ + if MC is None: + MC = transmon.instr_MC.get_instr() + + # append the calibration points, times are for location in plot + probabilities = np.linspace(0.01, 1.0, 10) + probabilities = np.concatenate([probabilities, [1.0] * 4]) + + awg = transmon.instr_LutMan_MW.get_instr().AWG.get_instr() + transmon.ro_acq_averages(2**11) + + if prepare_for_timedomain: + transmon.prepare_for_timedomain() + + p = ramsey_stochastic( + qubit_idx=transmon.cfg_qubit_nr(), + platf_cfg=transmon.cfg_openql_platform_fn(), + probabilities=probabilities + ) + + s = OpenQLSweepUpdateHDAWG( + openql_program=p, + CCL=transmon.instr_CC.get_instr(), + awg=awg, + default_codeword=0, + trigger_codeword=1, + parameter_name='Time', + unit='s', + ) + + MC.set_sweep_function(s) + MC.set_sweep_points(probabilities) + MC.set_detector_function(transmon.int_avg_det) + print(MC.get_datawriting_start_idx()) + response = MC.run('Ramsey_stochastic' + label + transmon.msmt_suffix, disable_snapshot_metadata=disable_metadata) + return response diff --git a/pycqed/measurement/flux_crosstalk_ac/measurement_function.py b/pycqed/measurement/flux_crosstalk_ac/measurement_function.py new file mode 100644 index 0000000000..7324f40bf7 --- /dev/null +++ b/pycqed/measurement/flux_crosstalk_ac/measurement_function.py @@ -0,0 +1,284 @@ +# ------------------------------------------- +# Module describing the measurement functionality used for AC flux-crosstalk experiment +# ------------------------------------------- +from abc import ABC, abstractmethod +import os +import functools +import numpy as np +import warnings +from typing import TypeVar, Type, Dict, Optional +from pycqed.instrument_drivers.meta_instrument.device_object_CCL import DeviceCCL as Device +from pycqed.instrument_drivers.meta_instrument.qubit_objects.CCL_Transmon import CCLight_Transmon as Transmon +from pycqed.measurement.measurement_control import MeasurementControl as MeasurementControl +from pycqed.instrument_drivers.physical_instruments.QuTech.CC import CC as CentralControl +from pycqed.instrument_drivers.meta_instrument.LutMans.flux_lutman_vcz import HDAWG_Flux_LutMan as FluxLutMan +from pycqed.measurement.sweep_functions import ( + FLsweep as FluxSweepFunctionObject, + OpenQL_Sweep as OpenQLSweep, +) +from pycqed.measurement.flux_crosstalk_ac.schedule import ( + OqlProgram, + schedule_flux_crosstalk, + schedule_ramsey, +) +import pycqed.instrument_drivers.meta_instrument.Surface17_dependency_graph as S17GBT +from pycqed.analysis.analysis_toolbox import get_datafilepath_from_timestamp +from pycqed.analysis_v2.base_analysis import BaseDataAnalysis +import pycqed.measurement.hdf5_data as hd5 +import matplotlib.pyplot as plt +from pycqed.qce_utils.custom_exceptions import InterfaceMethodException + + +class IBaseDataAnalysis(ABC): + + # region Interface Methods + @abstractmethod + def process_data(self): + """ + process_data: overloaded in child classes, + takes care of mundane tasks such as binning filtering etc + """ + raise InterfaceMethodException + + @abstractmethod + def prepare_plots(self): + """ + Defines a default plot by setting up the plotting dictionaries to + specify what is to be plotted + """ + raise InterfaceMethodException + + @abstractmethod + def analyze_fit_results(self): + """ + Do analysis on the results of the fits to extract quantities of + interest. + """ + raise InterfaceMethodException + # endregion + + +def plot_example(x_array: np.ndarray, y_array: np.ndarray, ax: Optional[plt.Axes] = None, **kwargs): + # Get keyword argument defaults + x_label: str = kwargs.pop('x_label', 'Default Label [a.u.]') + y_label: str = kwargs.pop('y_label', 'Default Label [a.u.]') + # Plot figure + print(kwargs) + if ax is None: + fig, ax = plt.subplots() + ax.plot( + x_array, + y_array, + '.-', + ) + ax.grid(True, alpha=0.5, linestyle='dashed') + ax.set_axisbelow(True) + ax.set_xlabel(x_label) + ax.set_ylabel(y_label) + +TBaseDataAnalysis = TypeVar('TBaseDataAnalysis', bound=BaseDataAnalysis) + +class FluxCrosstalkAnalysis(BaseDataAnalysis, IBaseDataAnalysis): + + # region Class Constructor + def __init__(self, t_start: str = None, t_stop: str = None, label: str = '', data_file_path: str = None, close_figs: bool = True, options_dict: dict = None, extract_only: bool = False, do_fitting: bool = False, save_qois: bool = True): + super().__init__(t_start, t_stop, label, data_file_path, close_figs, options_dict, extract_only, do_fitting, save_qois) + self.params_dict: Dict = {} + self.numeric_params: Dict = {} + # endregion + + # region Interface Methods + def extract_data(self): + """ + This is a new style (sept 2019) data extraction. + This could at some point move to a higher level class. + """ + self.get_timestamps() + self.timestamp = self.timestamps[0] + data_filepath = get_datafilepath_from_timestamp(self.timestamp) + self._raw_data_key: str = 'data' + param_spec = { + self._raw_data_key: ('Experimental Data/Data', 'dset'), + 'value_names': ('Experimental Data', 'attr:value_names'), + } + self.raw_data_dict = hd5.extract_pars_from_datafile(data_filepath, param_spec) + self.raw_data_dict['timestamps'] = self.timestamps + self.raw_data_dict['folder'] = os.path.split(data_filepath)[0] + + def process_data(self): + """ + process_data: overloaded in child classes, + takes care of mundane tasks such as binning filtering etc + """ + self.data_array: np.ndarray = self.raw_data_dict[self._raw_data_key] + + def prepare_plots(self): + """ + Defines a default plot by setting up the plotting dictionaries to + specify what is to be plotted + """ + self.plot_dicts['Example_plot'] = { + 'plotfn': plot_example, + 'x_array': self.data_array[:, 0], + 'y_array': self.data_array[:, 1], + 'x_label': 'Times [s]', + } + + def analyze_fit_results(self): + """ + Do analysis on the results of the fits to extract quantities of + interest. + """ + # raise NotImplemented + pass + # endregion + + +def decorator_run_analysis(analysis_class: Type[TBaseDataAnalysis], filter_label: str): + + def decorator(func): + """ + Decorator that constructs analysis and triggers execution. + """ + @functools.wraps(func) + def wrapper(*args, **kwargs): + result = func(*args, **kwargs) + + # Construct analysis class + instance: TBaseDataAnalysis = analysis_class(label=filter_label) + instance.run_analysis() + + return result + return wrapper + + return decorator + + +def decorator_pyplot_dataset(dataset_key: str, array_dimensions: int, show_figure: bool, *plot_args, **plot_kwargs): + + def decorator(func): + """ + Decorator to measure and record the execution time of the decorated function or method. + """ + @functools.wraps(func) + def wrapper(*args, **kwargs): + result = func(*args, **kwargs) + # Assumes dictionary structure + dataset_key_exists: bool = dataset_key in result and isinstance(result[dataset_key], np.ndarray) + # Guard clause, if dataset key does not exists + if not dataset_key_exists: + warnings.warn(f"Key: {dataset_key} is not present in function output.") + return result + + data_array: np.ndarray = result[dataset_key] + # Get keyword argument defaults + x_label: str = plot_kwargs.pop('x_label', 'Default Label [a.u.]') + y_label: str = plot_kwargs.pop('y_label', 'Default Label [a.u.]') + # Plot figure + fig, ax = plt.subplots(*plot_args, **plot_kwargs) + ax.plot( + data_array[:, 0], + data_array[:, 1], + '.-', + ) + ax.grid(True, alpha=0.5, linestyle='dashed') + ax.set_axisbelow(True) + ax.set_xlabel(x_label) + ax.set_ylabel(y_label) + + if show_figure: + plt.show() + else: + plt.close(fig) + + return result + return wrapper + + return decorator + + +# @decorator_run_analysis( +# analysis_class=FluxCrosstalkAnalysis, +# filter_label='FluxCrosstalk', +# ) +def measure_ac_flux_crosstalk(device: Device, qubit_echo_id: str, qubit_target_id: str, prepare_for_timedomain: bool = False, disable_metadata: bool = True): + """ + Performs an experiment + """ + # Data allocation + qubit_echo: Transmon = device.find_instrument(qubit_echo_id) + qubit_target: Transmon = device.find_instrument(qubit_target_id) + flux_lutman_echo: FluxLutMan = qubit_echo.instr_LutMan_Flux.get_instr() + flux_lutman_target: FluxLutMan = qubit_target.instr_LutMan_Flux.get_instr() + + # Sweep parameters + target_amplitudes: np.ndarray = np.linspace(-0.5, 0.5, 7) + flux_pulse_duration: int = 60 + 20 # [ns] + echo_detuning: np.ndarray = np.linspace(20e6, 100e6, 7) + echo_amplitudes: np.ndarray = S17GBT.get_DAC_amp_frequency(echo_detuning, flux_lutman_echo, negative_amp=False) + # echo_amplitude_channel: float = S17GBT.get_Ch_amp_frequency(max(abs(echo_detuning)), flux_lutman_echo, DAC_param='sq_amp') + print('echo detuning: ', echo_detuning) + print('echo amplitudes: ', echo_amplitudes) + + # Update lutmans + flux_lutman_echo.sq_length((flux_pulse_duration - 20) * 1e-9) + flux_lutman_target.sq_length((flux_pulse_duration - 20) * 1e-9) + flux_lutman_echo.load_waveform_onto_AWG_lookuptable("sq_length", regenerate_waveforms=True) + flux_lutman_target.load_waveform_onto_AWG_lookuptable("sq_length", regenerate_waveforms=True) + + # flux_pulse_amplitude: np.ndarray = np.asarray([flux_lutman.sq_amp()]) + flux_cw = "sf_square" + meas_control: MeasurementControl = device.instr_MC.get_instr() + central_control: CentralControl = device.instr_CC.get_instr() + + # Prepare for time-domain if requested + if prepare_for_timedomain: + device.prepare_for_timedomain(qubits=[qubit_echo_id, qubit_target_id]) + + schedule: OqlProgram = schedule_flux_crosstalk( + qubit_echo_index=qubit_echo.cfg_qubit_nr(), + qubit_target_index=qubit_target.cfg_qubit_nr(), + flux_pulse_cw=flux_cw, # Square pulse? + platf_cfg=device.cfg_openql_platform_fn(), + half_echo_delay_ns=flux_pulse_duration, # [ns] + ) + sweep_function_echo: FluxSweepFunctionObject = FluxSweepFunctionObject( + flux_lutman_echo, + flux_lutman_echo.sq_amp, # Square length (qcodes) parameter + amp_for_generation=None, + waveform_name="square", # Meaning we are going to sweep the square-pulse parameters only + ) + sweep_function_target: FluxSweepFunctionObject = FluxSweepFunctionObject( + flux_lutman_target, + flux_lutman_target.sq_amp, # Square length (qcodes) parameter + amp_for_generation=None, + waveform_name="square", # Meaning we are going to sweep the square-pulse parameters only + ) + + # flux_pulse_duration = np.concatenate([flux_pulse_duration, np.zeros(shape=4)]) # Include calibration points + central_control.eqasm_program(schedule.filename) + central_control.start() + + detector = device.get_int_avg_det( + qubits=[qubit_echo_id], + single_int_avg=True, + always_prepare=True, + ) + + meas_control.set_sweep_functions([sweep_function_echo, sweep_function_target]) + meas_control.set_sweep_points(np.column_stack([echo_amplitudes, target_amplitudes])) + + meas_control.set_detector_function(detector) + label = f'FluxCrosstalk_{qubit_echo.name}' + result = meas_control.run(label, disable_snapshot_metadata=disable_metadata) + + return result + + +if __name__ == "__main__": + from pycqed.measurement.flux_crosstalk_ac import measurement_function as fluxcross_module + reload(fluxcross_module) + + fluxcross_module.measure_ac_flux_crosstalk( + device, + ) \ No newline at end of file diff --git a/pycqed/measurement/flux_crosstalk_ac/schedule.py b/pycqed/measurement/flux_crosstalk_ac/schedule.py new file mode 100644 index 0000000000..fb315e3feb --- /dev/null +++ b/pycqed/measurement/flux_crosstalk_ac/schedule.py @@ -0,0 +1,44 @@ +# ------------------------------------------- +# Module describing the OpenQL schedule used for AC flux-crosstalk experiment +# ------------------------------------------- +import numpy as np +from pycqed.measurement.openql_experiments.openql_helpers import OqlProgram + + +def schedule_flux_crosstalk( + qubit_echo_index: int, + qubit_target_index: int, + flux_pulse_cw: str, + platf_cfg: str = '', + half_echo_delay_ns: int = 0, # Delay in ns with respect to end of qubit gate + ) -> OqlProgram: + """ + """ + + p = OqlProgram("FluxCrosstalk", platf_cfg) + + # Echo kernel + k = p.create_kernel("echo") + k.prepz([qubit_echo_index, qubit_target_index]) + k.barrier([]) + # Echo part 1 + k.gate('rx90', [qubit_echo_index]) + k.gate('wait', [], half_echo_delay_ns) + k.barrier([]) + # Echo part 2 + k.gate('ry180', [qubit_echo_index]) + k.gate(flux_pulse_cw, [qubit_echo_index]) + k.gate(flux_pulse_cw, [qubit_target_index]) + k.barrier([]) # alignment workaround + k.gate('wait', [], half_echo_delay_ns) + k.gate('ry90', [qubit_echo_index]) + k.barrier([]) + # Measure + k.measure(qubit_echo_index) + p.add_kernel(k) + + # Calibration kernel + p.add_single_qubit_cal_points(qubit_idx=qubit_echo_index) + + p.compile() + return p diff --git a/pycqed/measurement/openql_experiments/clifford_rb_oql.py b/pycqed/measurement/openql_experiments/clifford_rb_oql.py index e2529aa76b..049599d9f0 100644 --- a/pycqed/measurement/openql_experiments/clifford_rb_oql.py +++ b/pycqed/measurement/openql_experiments/clifford_rb_oql.py @@ -501,179 +501,6 @@ def randomized_benchmarking( return p -# def two_qubit_randomized_benchmarking( -# two_qubit_pair: list, -# single_qubits: list, -# platf_cfg: str, -# nr_cliffords, -# nr_seeds: int, -# two_qubit_net_cliffords: list = [0], -# single_qubit_net_cliffords: list = [0], -# max_clifford_idx: int = 11520, -# flux_codeword: str = "cz", -# flux_allocated_duration_ns: int = None, -# interleaving_cliffords=[None], -# program_name: str = "randomized_benchmarking", -# cal_points: bool = True, -# f_state_cal_pts: bool = True, -# recompile: bool = True, -# ): - -# assert len(two_qubit_net_cliffords) == len(single_qubit_net_cliffords) - -# two_qubit_map = {f'q{i}' : qb for i, qb in enumerate(two_qubit_pair)} -# if single_qubits != None: -# single_qubit_map = {f'q{i}' : qb for i, qb in enumerate(single_qubits)} - -# p = oqh.create_program(program_name, platf_cfg) - -# this_file = inspect.getfile(inspect.currentframe()) - -# # Ensure that programs are recompiled when changing the code as well -# recompile_dict = oqh.check_recompilation_needed_hash_based( -# program_fn=p.filename, -# platf_cfg=platf_cfg, -# clifford_rb_oql=this_file, -# recompile=recompile, -# ) - -# if not recompile_dict["recompile"]: -# os.rename(recompile_dict["tmp_file"], recompile_dict["file"]) -# return p - -# if 100_000 in interleaving_cliffords and flux_allocated_duration_ns is None: -# # Try to get the flux duration from the cfg file -# with open(platf_cfg) as json_file: -# loaded_json = json.load(json_file) -# try: -# flux_allocated_duration_ns = loaded_json["instructions"]["sf_cz_se q0"][ -# "duration" -# ] -# except KeyError: -# raise ValueError("Could not find flux duration. Specify manually!") - -# for seed in range(nr_seeds): -# for j, n_cl in enumerate(nr_cliffords): -# for interleaving_cl in interleaving_cliffords: - -# # Generate 2-qubit sequence -# for net_clifford_2q, net_clifford_1q in zip(two_qubit_net_cliffords, single_qubit_net_cliffords): -# two_cl_seq = rb.randomized_benchmarking_sequence( -# n_cl, -# number_of_qubits=2, -# desired_net_cl=net_clifford_2q, -# max_clifford_idx=max_clifford_idx, -# interleaving_cl=interleaving_cl, -# ) -# net_two_cl_seq = rb.calculate_net_clifford(two_cl_seq, TwoQubitClifford) -# # decompose -# two_cl_seq_decomposed = [] -# for cl in two_cl_seq: -# # benchmarking only CZ (not as a member of CNOT group) -# if cl == 104368: # 104368 = 100_000 + CZ -# two_cl_seq_decomposed.append([("CZ", ["q0", "q1"])]) -# # benchmarking only idling identity, with duration of cz -# # see below where wait-time is added -# elif cl == 100_000: -# two_cl_seq_decomposed.append([("I", ["q0", "q1"])]) -# else: -# two_cl_seq_decomposed.append(TwoQubitClifford(cl).gate_decomposition) - -# # Generate single-qubit sequence -# if single_qubits != None: -# Single_cl_seq = {} -# net_Single_cl_seq = {} -# Single_cl_seq_decomposed = dict.fromkeys(single_qubits) -# for single_qubit in single_qubits: -# Single_cl_seq[single_qubit] = rb.randomized_benchmarking_sequence( -# n_cl, -# number_of_qubits=1, -# desired_net_cl=net_clifford_1q, -# max_clifford_idx=max_clifford_idx, -# ) -# net_Single_cl_seq[single_qubit] = rb.calculate_net_clifford(Single_cl_seq[single_qubit], SingleQubitClifford) -# Single_cl_seq_decomposed[single_qubit] = [] -# for cl in Single_cl_seq[single_qubit]: -# Single_cl_seq_decomposed[single_qubit].append(SingleQubitClifford(cl).gate_decomposition) - - -# # # generate OpenQL kernel for every net_clifford -# # for net_clifford in net_cliffords: -# # create decomposed sequence including recovery -# two_recovery_to_idx_clifford = net_two_cl_seq.get_inverse() -# two_recovery_clifford = TwoQubitClifford(net_clifford_2q) * two_recovery_to_idx_clifford -# two_cl_seq_decomposed_with_net = two_cl_seq_decomposed + [ -# two_recovery_clifford.gate_decomposition -# ] -# if single_qubits != None: -# for single_qubit in single_qubits: -# single_recovery_to_idx_clifford = net_Single_cl_seq[single_qubit].get_inverse() -# single_recovery_clifford = SingleQubitClifford(net_clifford_1q) * single_recovery_to_idx_clifford -# single_cl_seq_decomposed_with_net = Single_cl_seq_decomposed[single_qubit] + [ -# single_recovery_clifford.gate_decomposition -# ] - -# k = oqh.create_kernel( -# "RB_{}Cl_s{}_net{}_inter{}".format( -# int(n_cl), seed, net_clifford_2q, interleaving_cl -# ), -# p, -# ) -# for qubit_idx in two_qubit_map.values(): -# k.prepz(qubit_idx) -# if single_qubits != None: -# for qubit_idx in single_qubit_map.values(): -# k.prepz(qubit_idx) - -# print(two_cl_seq_decomposed_with_net) -# if single_qubits != None: -# print(single_cl_seq_decomposed_with_net) -# # print(len(two_cl_seq_decomposed_with_net), len(single_cl_seq_decomposed_with_net)) - -# for i, gates in enumerate(two_cl_seq_decomposed_with_net): - -# if i%2 == 0 and single_qubit != None: -# for g1, q1 in single_cl_seq_decomposed_with_net[i//2]: -# k.gate(g1, [single_qubit_map[q1]]) - -# for g, q in gates: -# if isinstance(q, str): # single qubit gate -# k.gate(g, [two_qubit_map[q]]) -# elif isinstance(q, list): # 2 qubit gate -# if g == "I": -# # interleaving an idling with the length of the CZ -# k.gate("wait", [], 0) # alignment -# k.gate("wait", [], flux_allocated_duration_ns) -# k.gate("wait", [], 0) -# else: -# k.gate("wait", [], 0) -# k.gate( -# flux_codeword, list(two_qubit_map.values()) -# ) # fix for QCC -# k.gate("wait", [], 0) -# # Measurement -# k.gate("wait", [], 0) -# for qubit_idx in two_qubit_map.values(): -# k.measure(qubit_idx) -# k.gate("wait", [], 0) -# p.add_kernel(k) - -# if cal_points: -# if f_state_cal_pts: -# combinations = ["00", "01", "10", "11", "02", "20", "22"] -# else: -# combinations = ["00", "01", "10", "11"] -# p = oqh.add_multi_q_cal_points( -# p, qubits=two_qubit_pair, combinations=combinations -# ) - -# p = oqh.compile(p) -# # Just before returning we rename the hashes file as an indication of the -# # integrity of the RB code -# os.rename(recompile_dict["tmp_file"], recompile_dict["file"]) -# return p - - def two_qubit_randomized_benchmarking( two_qubit_pair: list, single_qubits: list, @@ -756,17 +583,17 @@ def two_qubit_randomized_benchmarking( Single_cl_seq = {} net_Single_cl_seq = {} Single_cl_seq_decomposed = dict.fromkeys(single_qubits) - for i, sq in enumerate(single_qubits): - Single_cl_seq[sq] = rb.randomized_benchmarking_sequence( + for single_qubit in single_qubits: + Single_cl_seq[single_qubit] = rb.randomized_benchmarking_sequence( n_cl, number_of_qubits=1, - desired_net_cl=0, + desired_net_cl=net_clifford_1q, max_clifford_idx=max_clifford_idx, ) - net_Single_cl_seq[sq] = rb.calculate_net_clifford(Single_cl_seq[sq], SingleQubitClifford) - Single_cl_seq_decomposed[sq] = [] - for cl in Single_cl_seq[sq]: - Single_cl_seq_decomposed[sq].append(SingleQubitClifford(cl, i=i).gate_decomposition) + net_Single_cl_seq[single_qubit] = rb.calculate_net_clifford(Single_cl_seq[single_qubit], SingleQubitClifford) + Single_cl_seq_decomposed[single_qubit] = [] + for cl in Single_cl_seq[single_qubit]: + Single_cl_seq_decomposed[single_qubit].append(SingleQubitClifford(cl).gate_decomposition) # # generate OpenQL kernel for every net_clifford @@ -777,15 +604,13 @@ def two_qubit_randomized_benchmarking( two_cl_seq_decomposed_with_net = two_cl_seq_decomposed + [ two_recovery_clifford.gate_decomposition ] - # Jorge 6-4-2022: Fixme, recovery clifford for simultaneous - # single qubit RB of spectators is not working. - # if single_qubits != None: - # for sq in single_qubits: - # single_recovery_to_idx_clifford = net_Single_cl_seq[sq].get_inverse() - # single_recovery_clifford = SingleQubitClifford(net_clifford_1q) * single_recovery_to_idx_clifford - # single_cl_seq_decomposed_with_net = Single_cl_seq_decomposed[sq] + [ - # single_recovery_clifford.gate_decomposition - # ] + if single_qubits != None: + for single_qubit in single_qubits: + single_recovery_to_idx_clifford = net_Single_cl_seq[single_qubit].get_inverse() + single_recovery_clifford = SingleQubitClifford(net_clifford_1q) * single_recovery_to_idx_clifford + single_cl_seq_decomposed_with_net = Single_cl_seq_decomposed[single_qubit] + [ + single_recovery_clifford.gate_decomposition + ] k = p.create_kernel( "RB_{}Cl_s{}_net{}_inter{}".format( @@ -798,17 +623,13 @@ def two_qubit_randomized_benchmarking( for qubit_idx in single_qubit_map.values(): k.prepz(qubit_idx) - # print(two_cl_seq_decomposed_with_net) - # if single_qubits != None: - # print(single_cl_seq_decomposed_with_net) # print(len(two_cl_seq_decomposed_with_net), len(single_cl_seq_decomposed_with_net)) for i, gates in enumerate(two_cl_seq_decomposed_with_net): - if i%2 == 0 and single_qubits != None: - for sq in single_qubits: - for g1, q1 in Single_cl_seq_decomposed[sq][i//2]: - k.gate(g1, [single_qubit_map[q1]]) + if i%2 == 0 and single_qubit != None: + for g1, q1 in single_cl_seq_decomposed_with_net[i//2]: + k.gate(g1, [single_qubit_map[q1]]) for g, q in gates: if isinstance(q, str): # single qubit gate @@ -838,7 +659,7 @@ def two_qubit_randomized_benchmarking( combinations = ["00", "01", "10", "11", "02", "20", "22"] else: combinations = ["00", "01", "10", "11"] - + p.add_multi_q_cal_points( qubits=two_qubit_pair, combinations=combinations ) diff --git a/pycqed/measurement/openql_experiments/config_cc_s17_direct_iq.json.in b/pycqed/measurement/openql_experiments/config_cc_s17_direct_iq.json.in index 141e8efb51..2cdadb4f86 100644 --- a/pycqed/measurement/openql_experiments/config_cc_s17_direct_iq.json.in +++ b/pycqed/measurement/openql_experiments/config_cc_s17_direct_iq.json.in @@ -5,7 +5,7 @@ "eqasm_compiler" : "eqasm_backend_cc", "hardware_settings": { - "qubit_number": 17, + "qubit_number": 26, "cycle_time" : 20, // in [ns] "eqasm_backend_cc": { @@ -145,7 +145,7 @@ }, { "name": "ro_1", - "qubits": [[0], [1], [2], [3], [7], [8], [12], [13], [15]], + "qubits": [[0], [1], [2], [3], [7], [], [], [], []], "signal_type": "measure", "ref_instrument_definition": "zi-uhfqa", "ref_control_mode": "uhfqa-9ch", @@ -167,13 +167,25 @@ "io_module": "CC-CONN-DIO" } }, + { + "name": "ro_3", + "qubits": [[8], [12], [13], [15], [], [], [], [], []], + "signal_type": "measure", + "ref_instrument_definition": "zi-uhfqa", + "ref_control_mode": "uhfqa-9ch", + "controller": { + "name": "cc", // FIXME + "slot": 11, + "io_module": "CC-CONN-DIO" + } + }, // microwave. { "name": "mw_0", "qubits": [ // data qubits: - [9], [1], + [9], [5], [0] ], @@ -189,10 +201,10 @@ { "name": "mw_1", "qubits": [ // ancilla qubits: - [2], - [14], - [], - [] + [18], // Added LRU experiment! + [8], + [15], + [17] // Added LRU experiment! ], "signal_type": "mw", "ref_instrument_definition": "zi-hdawg", @@ -224,7 +236,7 @@ "name": "mw_3", "qubits": [ // ancilla qubits: [10], - [15], + [], [13], [16] ], @@ -240,10 +252,10 @@ { "name": "mw_4", "qubits": [ // ancilla qubits: - [], + [2], [6], [7], - [8] + [14] ], "signal_type": "mw", "ref_instrument_definition": "zi-hdawg", @@ -283,7 +295,7 @@ }, { "name": "flux_2", - "qubits": [[11], [], [], [], [], [], [], []], + "qubits": [[], [11], [22], [24], [20], [21], [25], [23]], "signal_type": "flux", "ref_instrument_definition": "zi-hdawg", "ref_control_mode": "awg8-flux", @@ -342,8 +354,59 @@ "cl_23 %0": ["rx90 %0", "ry90 %0", "rxm90 %0"], // CZ gates - "measure %0": ["rx12 %0", "measure %0"], + // "measure %0": ["rx12 %0", "measure %0"], + // LRU pulses + // "lru q16" : ["barrier q19, q16", "cw_lru q19", "update_ph_LRU q16", "barrier q19, q16"], // D6 + // "lru q13" : ["barrier q17, q13", "cw_lru q17", "update_ph_LRU q13", "barrier q17, q13"], // D5 + // "lru q15" : ["barrier q18, q15", "cw_lru q18", "update_ph_LRU q15", "barrier q18, q15"], // D4 + // "lru q3" : ["barrier q20, q3" , "sf_lru q20", "update_ph_LRU q3" , "barrier q20, q3" ], // X3 + // "lru q12" : ["barrier q21, q12", "sf_lru q21", "update_ph_LRU q12", "barrier q21, q12"], // Z3 + // "lru q10" : ["barrier q24, q10", "sf_lru q24", "update_ph_LRU q10", "barrier q24, q10"], // Z4 + // "lru q9" : ["barrier q25, q9" , "sf_lru q25", "update_ph_LRU q9" , "barrier q25, q9" ], // X4 + // "lru q7" : ["barrier q22, q7" , "sf_lru q22", "update_ph_LRU q7" , "barrier q22, q7" ], // Z1 + // "lru q14" : ["barrier q23, q14", "sf_lru q23", "update_ph_LRU q14", "barrier q23, q14"], // Z2 + "lru q15": ["cw_lru q18"], // D4 + "lru q13": ["cw_lru q17"], // D5 + "lru q16": ["cw_lru q19"], // D6 + "lru q1" : ["cw_lru q19"], // D7 (split from D6) + "lru q2" : ["cw_lru q19"], // D2 (split from D6) + // "lru q6" : ["barrier q18, q6" ,"cw_lru q18", "update_ph_LRU q6", "barrier q18, q6"], // D1 (split from D4) + "lru q6" : ["cw_lru q18"], // D1 (split from D4) + "lru q0" : ["cw_lru q18"], // D3 (split from D4) + "lru q4" : ["cw_lru q17"], // D9 (split from D5) + "lru q5" : ["cw_lru q17"], // D8 (split from D5) + "lru q3" : ["sf_lru q20"], // X3 + "lru q12": ["sf_lru q21"], // Z3 + "lru q10": ["sf_lru q24"], // Z4 + "lru q9" : ["sf_lru q25"], // X4 + "lru q7" : ["sf_lru q22"], // Z1 + "lru q14": ["sf_lru q23"], // Z2 + "lru q11": ["sf_lru q25"], // X1 (split from X4) + "lru q8" : ["sf_lru q22"], // X2 (split from Z1) + // "lru q15": ["barrier q15, q18", "cw_lru q18", "update_ph_LRU q15","barrier q15, q18"], // D4 + // "lru q13": ["barrier q13, q17", "cw_lru q17", "update_ph_LRU q13","barrier q13, q17"], // D5 + // "lru q16": ["barrier q16, q19", "cw_lru q19", "update_ph_LRU q16","barrier q16, q19"], // D6 + // "lru q1": ["barrier q1, q19", "cw_lru q19", "update_ph_LRU q1","barrier q1, q19"], // D7 (split from D6) + // "lru q2": ["barrier q2, q19", "cw_lru q19", "update_ph_LRU q2","barrier q2, q19"], // D2 (split from D6) + // "lru q6": ["barrier q6, q18", "cw_lru q18", "update_ph_LRU q6","barrier q6, q18"], // D1 (split from D4) + // "lru q0": ["barrier q0, q18", "cw_lru q18", "update_ph_LRU q0","barrier q0, q18"], // D3 (split from D4) + // "lru q4": ["barrier q4, q17", "cw_lru q17", "update_ph_LRU q4","barrier q4, q17"], // D9 (split from D5) + // "lru q5": ["barrier q5, q17", "cw_lru q17", "update_ph_LRU q5","barrier q5, q17"], // D8 (split from D5) + // "lru q3" : ["barrier q3, q20", "sf_lru q20", "update_ph_LRU q3","barrier q3, q20"], // X3 + // "lru q12": ["barrier q12, q21", "sf_lru q21", "update_ph_LRU q12","barrier q12, q21"], // Z3 + // "lru q10": ["barrier q10, q24", "sf_lru q24", "update_ph_LRU q10","barrier q10, q24"], // Z4 + // "lru q9" : ["barrier q9, q25", "sf_lru q25", "update_ph_LRU q9","barrier q9, q25"], // X4 + // "lru q7" : ["barrier q7, q22", "sf_lru q22", "update_ph_LRU q7","barrier q7, q22"], // Z1 + // "lru q14": ["barrier q14, q23", "sf_lru q23", "update_ph_LRU q14","barrier q14, q23"], // Z2 + // "lru q11": ["barrier q11, q25", "sf_lru q25", "update_ph_LRU q11","barrier q11, q25"], // X1 (split from X4) + // "lru q8" : ["barrier q8, q22", "sf_lru q22", "update_ph_LRU q8","barrier q8, q22"], // X2 (split from Z1) + // "lru q1" : ["barrier q1, q22", "sf_lru q22", "update_ph_LRU q1", "barrier q1, q22"], + // "lru q15": ["barrier q18", "cw_40 q18", "barrier q18"], + // "lru q12": ["barrier q21, q23", "sf_lru q21", "sf_lru q23", "barrier q21, q23"], // Z3 + // "lru q1" : ["barrier q17", "cw_40 q17", "barrier q17"], + + // Updata by Hany [2021-06-01] // Individual CZ gates in Surface-17 // Decomposition of two qubit flux interactions as single-qubit flux @@ -358,6 +421,10 @@ // "cz q5, q9": ["barrier q9, q5, q4", "sf_cz_ne q5", "sf_cz_sw q9","sf_park q4", "barrier q9, q5, q4"], "cz q9, q5": ["barrier q9, q5, q4", "sf_cz_ne q5", "sf_cz_sw q9","sf_park q4", "barrier q9, q5, q4", "update_ph_ne q5", "update_ph_sw q9", "barrier q9, q5, q4"], "cz q5, q9": ["barrier q9, q5, q4", "sf_cz_ne q5", "sf_cz_sw q9","sf_park q4", "barrier q9, q5, q4", "update_ph_ne q5", "update_ph_sw q9", "barrier q9, q5, q4"], + // "cz q9, q5": ["barrier q9, q5, q4", "sf_cz_ne q5", "sf_square q9","sf_park q4", "barrier q9, q5, q4", "update_ph_ne q5", "update_ph_sw q9", "barrier q9, q5, q4"], + // "cz q5, q9": ["barrier q9, q5, q4", "sf_cz_ne q5", "sf_square q9","sf_park q4", "barrier q9, q5, q4", "update_ph_ne q5", "update_ph_sw q9", "barrier q9, q5, q4"], + // "cz q9, q5": ["barrier q9, q5, q4", "sf_cz_ne q5", "sf_square q9", "barrier q9, q5, q4", "update_ph_ne q5", "update_ph_sw q9", "barrier q9, q5, q4"], + // "cz q5, q9": ["barrier q9, q5, q4", "sf_cz_ne q5", "sf_square q9", "barrier q9, q5, q4", "update_ph_ne q5", "update_ph_sw q9", "barrier q9, q5, q4"], // Edge 1/25 // "cz q9, q4": ["barrier q9, q4, q5", "sf_cz_nw q4", "sf_cz_se q9","sf_park q5", "barrier q9, q4, q5"], // "cz q4, q9": ["barrier q9, q4, q5", "sf_cz_nw q4", "sf_cz_se q9","sf_park q5", "barrier q9, q4, q5"], @@ -366,8 +433,8 @@ // Edge 5/29 // "cz q5, q10": ["barrier q5, q10, q4", "sf_cz_nw q10", "sf_cz_se q5","sf_park q4", "barrier q5, q10, q4"], // "cz q10, q5": ["barrier q5, q10, q4", "sf_cz_nw q10", "sf_cz_se q5","sf_cz_sw q4", "barrier q5, q10, q4"], - "cz q5, q10": ["barrier q5, q10, q4", "sf_cz_nw q10", "sf_cz_se q5","sf_park q4", "barrier q5, q10, q4", "update_ph_nw q10", "update_ph_se q5", "barrier q5, q10, q4"], - "cz q10, q5": ["barrier q5, q10, q4", "sf_cz_nw q10", "sf_cz_se q5","sf_cz_sw q4", "barrier q5, q10, q4", "update_ph_nw q10", "update_ph_se q5", "barrier q5, q10, q4"], + "cz q5, q10": ["barrier q5, q10, q4", "sf_cz_nw q10", "sf_cz_se q5", "sf_park q4", "barrier q5, q10, q4", "update_ph_nw q10", "update_ph_se q5", "barrier q5, q10, q4"], + "cz q10, q5": ["barrier q5, q10, q4", "sf_cz_nw q10", "sf_cz_se q5", "sf_park q4", "barrier q5, q10, q4", "update_ph_nw q10", "update_ph_se q5", "barrier q5, q10, q4"], // Edge 6/30 // "cz q4, q10": ["barrier q4, q10, q5", "sf_cz_ne q10", "sf_cz_sw q4","sf_park q5", "barrier q4, q10, q5"], // "cz q10, q4": ["barrier q4, q10, q5", "sf_cz_ne q10", "sf_cz_sw q4","sf_park q5", "barrier q4, q10, q5"], @@ -383,16 +450,20 @@ // "cz q3, q1": ["barrier q1, q3, q5", "sf_cz_nw q3", "sf_cz_se q1","sf_park q5", "barrier q1, q3, q5"], "cz q1, q3": ["barrier q1, q3, q5", "sf_cz_nw q3", "sf_cz_se q1","sf_park q5", "barrier q1, q3, q5", "update_ph_nw q3", "update_ph_se q1", "barrier q1, q3, q5"], "cz q3, q1": ["barrier q1, q3, q5", "sf_cz_nw q3", "sf_cz_se q1","sf_park q5", "barrier q1, q3, q5", "update_ph_nw q3", "update_ph_se q1", "barrier q1, q3, q5"], + // "cz q1, q3": ["barrier q1, q3, q5", "sf_square q3", "sf_cz_se q1","sf_park q5", "barrier q1, q3, q5", "update_ph_nw q3", "update_ph_se q1", "barrier q1, q3, q5"], + // "cz q3, q1": ["barrier q1, q3, q5", "sf_square q3", "sf_cz_se q1","sf_park q5", "barrier q1, q3, q5", "update_ph_nw q3", "update_ph_se q1", "barrier q1, q3, q5"], // Edge 4/28 // "cz q3, q5": ["barrier q3, q5, q1", "sf_cz_ne q3", "sf_cz_sw q5","sf_park q1", "barrier q3, q5, q1"], // "cz q5, q3": ["barrier q3, q5, q1", "sf_cz_ne q5", "sf_cz_sw q3","sf_park q1", "barrier q3, q5, q1"], "cz q3, q5": ["barrier q3, q5, q1", "sf_cz_ne q3", "sf_cz_sw q5","sf_park q1", "barrier q3, q5, q1", "update_ph_ne q3", "update_ph_sw q5", "barrier q3, q5, q1"], - "cz q5, q3": ["barrier q3, q5, q1", "sf_cz_ne q5", "sf_cz_sw q3","sf_park q1", "barrier q3, q5, q1", "update_ph_ne q5", "update_ph_sw q3", "barrier q3, q5, q1"], + "cz q5, q3": ["barrier q3, q5, q1", "sf_cz_ne q3", "sf_cz_sw q5","sf_park q1", "barrier q3, q5, q1", "update_ph_ne q3", "update_ph_sw q5", "barrier q3, q5, q1"], // Edge 7/31 // "cz q12, q15": ["barrier q12, q15, q3, q7", "sf_cz_nw q15", "sf_cz_se q12","sf_park q3","sf_park q7", "barrier q12, q15, q3, q7"], // "cz q15, q12": ["barrier q12, q15, q3, q7", "sf_cz_nw q15", "sf_cz_se q12","sf_park q3","sf_park q7", "barrier q12, q15, q3, q7"], "cz q12, q15": ["barrier q12, q15, q3, q7", "sf_cz_nw q15", "sf_cz_se q12","sf_park q3","sf_park q7", "barrier q12, q15, q3, q7", "update_ph_nw q15", "update_ph_se q12", "barrier q12, q15, q3, q7"], "cz q15, q12": ["barrier q12, q15, q3, q7", "sf_cz_nw q15", "sf_cz_se q12","sf_park q3","sf_park q7", "barrier q12, q15, q3, q7", "update_ph_nw q15", "update_ph_se q12", "barrier q12, q15, q3, q7"], + // "cz q12, q15": ["barrier q12, q15, q3, q7", "sf_cz_nw q15", "sf_cz_se q12","sf_park q7", "barrier q12, q15, q3, q7", "update_ph_nw q15", "update_ph_se q12", "barrier q12, q15, q3, q7"], + // "cz q15, q12": ["barrier q12, q15, q3, q7", "sf_cz_nw q15", "sf_cz_se q12","sf_park q7", "barrier q12, q15, q3, q7", "update_ph_nw q15", "update_ph_se q12", "barrier q12, q15, q3, q7"], // Edge 8/32 // "cz q3, q15": ["barrier q3, q15, q7, q12", "sf_cz_ne q15", "sf_cz_sw q3","sf_park q7","sf_park q12", "barrier q3, q15, q7, q12"], // "cz q15, q3": ["barrier q3, q15, q7, q12", "sf_cz_ne q15", "sf_cz_sw q3","sf_park q7","sf_park q12", "barrier q3, q15, q7, q12"], @@ -403,31 +474,43 @@ // "cz q13, q3": ["barrier q3, q13, q7, q8, q10", "sf_cz_nw q13", "sf_cz_se q3","sf_park q7","sf_park q8","sf_park q10", "barrier q3, q13, q7, q8, q10"], "cz q3, q13": ["barrier q3, q13, q7, q8, q10", "sf_cz_nw q13", "sf_cz_se q3","sf_park q7","sf_park q8","sf_park q10", "barrier q3, q13, q7, q8, q10", "update_ph_nw q13", "update_ph_se q3", "barrier q3, q13, q7, q8, q10"], "cz q13, q3": ["barrier q3, q13, q7, q8, q10", "sf_cz_nw q13", "sf_cz_se q3","sf_park q7","sf_park q8","sf_park q10", "barrier q3, q13, q7, q8, q10", "update_ph_nw q13", "update_ph_se q3", "barrier q3, q13, q7, q8, q10"], + // "cz q3, q13": ["barrier q3, q13, q7, q8, q10", "sf_cz_nw q13", "sf_cz_se q3","sf_park q7","sf_park q10", "barrier q3, q13, q7, q8, q10", "update_ph_nw q13", "update_ph_se q3", "barrier q3, q13, q7, q8, q10"], + // "cz q13, q3": ["barrier q3, q13, q7, q8, q10", "sf_cz_nw q13", "sf_cz_se q3","sf_park q7","sf_park q10", "barrier q3, q13, q7, q8, q10", "update_ph_nw q13", "update_ph_se q3", "barrier q3, q13, q7, q8, q10"], // Edge 10/34 // "cz q10, q13": ["barrier q10, q13, q3, q7, q8", "sf_cz_ne q13", "sf_cz_sw q10","sf_park q3","sf_park q7","sf_park q8", "barrier q10, q13, q3, q7, q8"], // "cz q13, q10": ["barrier q10, q13, q3, q7, q8", "sf_cz_ne q13", "sf_cz_sw q10","sf_park q3","sf_park q7","sf_park q8", "barrier q10, q13, q3, q7, q8"], "cz q10, q13": ["barrier q10, q13, q3, q7, q8", "sf_cz_ne q13", "sf_cz_sw q10","sf_park q3","sf_park q7","sf_park q8", "barrier q10, q13, q3, q7, q8", "update_ph_ne q13", "update_ph_sw q10", "barrier q10, q13, q3, q7, q8"], "cz q13, q10": ["barrier q10, q13, q3, q7, q8", "sf_cz_ne q13", "sf_cz_sw q10","sf_park q3","sf_park q7","sf_park q8", "barrier q10, q13, q3, q7, q8", "update_ph_ne q13", "update_ph_sw q10", "barrier q10, q13, q3, q7, q8"], + // "cz q10, q13": ["barrier q10, q13, q3, q7, q8", "sf_cz_ne q13", "sf_cz_sw q10","sf_park q7", "barrier q10, q13, q3, q7, q8", "update_ph_ne q13", "update_ph_sw q10", "barrier q10, q13, q3, q7, q8"], + // "cz q13, q10": ["barrier q10, q13, q3, q7, q8", "sf_cz_ne q13", "sf_cz_sw q10","sf_park q7", "barrier q10, q13, q3, q7, q8", "update_ph_ne q13", "update_ph_sw q10", "barrier q10, q13, q3, q7, q8"], // Edge 11/35 // "cz q10, q16": ["barrier q10, q16, q8, q14", "sf_cz_nw q16", "sf_cz_se q10","sf_park q8","sf_park q14", "barrier q10, q16, q8, q14"], // "cz q16, q10": ["barrier q10, q16, q8, q14", "sf_cz_nw q16", "sf_cz_se q10","sf_park q8","sf_park q14", "barrier q10, q16, q8, q14"], "cz q10, q16": ["barrier q10, q16, q8, q14", "sf_cz_nw q16", "sf_cz_se q10","sf_park q8","sf_park q14", "barrier q10, q16, q8, q14", "update_ph_nw q16", "update_ph_se q10", "barrier q10, q16, q8, q14"], "cz q16, q10": ["barrier q10, q16, q8, q14", "sf_cz_nw q16", "sf_cz_se q10","sf_park q8","sf_park q14", "barrier q10, q16, q8, q14", "update_ph_nw q16", "update_ph_se q10", "barrier q10, q16, q8, q14"], + // "cz q10, q16": ["barrier q10, q16, q8, q14", "sf_cz_nw q16", "sf_cz_se q10","sf_park q14", "barrier q10, q16, q8, q14", "update_ph_nw q16", "update_ph_se q10", "barrier q10, q16, q8, q14"], + // "cz q16, q10": ["barrier q10, q16, q8, q14", "sf_cz_nw q16", "sf_cz_se q10","sf_park q14","sf_park q0", "barrier q10, q16, q8, q14", "update_ph_nw q16", "update_ph_se q10", "barrier q10, q16, q8, q14"], // Edge 12/36 // "cz q15, q7": ["barrier q15, q7, q3, q12", "sf_cz_nw q7", "sf_cz_se q15","sf_park q3","sf_park q12", "barrier q15, q7, q3, q12"], // "cz q7, q15": ["barrier q15, q7, q3, q12", "sf_cz_nw q7", "sf_cz_se q15","sf_park q3","sf_park q12", "barrier q15, q7, q3, q12"], "cz q15, q7": ["barrier q15, q7, q3, q12", "sf_cz_nw q7", "sf_cz_se q15","sf_park q3","sf_park q12", "barrier q15, q7, q3, q12", "update_ph_nw q7", "update_ph_se q15", "barrier q15, q7, q3, q12"], "cz q7, q15": ["barrier q15, q7, q3, q12", "sf_cz_nw q7", "sf_cz_se q15","sf_park q3","sf_park q12", "barrier q15, q7, q3, q12", "update_ph_nw q7", "update_ph_se q15", "barrier q15, q7, q3, q12"], + // "cz q15, q7": ["barrier q15, q7, q3, q12", "sf_cz_nw q7", "sf_cz_se q15","sf_park q12", "barrier q15, q7, q3, q12", "update_ph_nw q7", "update_ph_se q15", "barrier q15, q7, q3, q12"], + // "cz q7, q15": ["barrier q15, q7, q3, q12", "sf_cz_nw q7", "sf_cz_se q15","sf_park q12", "sf_park q1", "barrier q15, q7, q3, q12", "update_ph_nw q7", "update_ph_se q15", "barrier q15, q7, q3, q12"], // Edge 13/37 // "cz q13, q7": ["barrier q13, q7, q3, q8, q10", "sf_cz_ne q7", "sf_cz_sw q13","sf_park q3","sf_park q8","sf_park q10", "barrier q13, q7, q3, q8, q10"], // "cz q7, q13": ["barrier q13, q7, q3, q8, q10", "sf_cz_ne q7", "sf_cz_sw q13","sf_park q3","sf_park q8","sf_park q10", "barrier q13, q7, q3, q8, q10"], "cz q13, q7": ["barrier q13, q7, q3, q8, q10", "sf_cz_ne q7", "sf_cz_sw q13","sf_park q3","sf_park q8","sf_park q10", "barrier q13, q7, q3, q8, q10", "update_ph_ne q7", "update_ph_sw q13", "barrier q13, q7, q3, q8, q10"], "cz q7, q13": ["barrier q13, q7, q3, q8, q10", "sf_cz_ne q7", "sf_cz_sw q13","sf_park q3","sf_park q8","sf_park q10", "barrier q13, q7, q3, q8, q10", "update_ph_ne q7", "update_ph_sw q13", "barrier q13, q7, q3, q8, q10"], + // "cz q13, q7": ["barrier q13, q7, q3, q8, q10", "sf_cz_ne q7", "sf_cz_sw q13","sf_park q10", "barrier q13, q7, q3, q8, q10", "update_ph_ne q7", "update_ph_sw q13", "barrier q13, q7, q3, q8, q10"], + // "cz q7, q13": ["barrier q13, q7, q3, q8, q10", "sf_cz_ne q7", "sf_cz_sw q13","sf_park q10", "barrier q13, q7, q3, q8, q10", "update_ph_ne q7", "update_ph_sw q13", "barrier q13, q7, q3, q8, q10"], // // Edge 14/38 // "cz q13, q8": ["barrier q13, q8, q3, q7, q10", "sf_cz_nw q8", "sf_cz_se q13","sf_park q3","sf_park q7","sf_park q10", "barrier q13, q8, q3, q7, q10"], // "cz q8, q13": ["barrier q13, q8, q3, q7, q10", "sf_cz_nw q8", "sf_cz_se q13","sf_park q3","sf_park q7","sf_park q10", "barrier q13, q8, q3, q7, q10"], "cz q13, q8": ["barrier q13, q8, q3, q7, q10", "sf_cz_nw q8", "sf_cz_se q13","sf_park q3","sf_park q7","sf_park q10", "barrier q13, q8, q3, q7, q10", "update_ph_nw q8", "update_ph_se q13", "barrier q13, q8, q3, q7, q10"], "cz q8, q13": ["barrier q13, q8, q3, q7, q10", "sf_cz_nw q8", "sf_cz_se q13","sf_park q3","sf_park q7","sf_park q10", "barrier q13, q8, q3, q7, q10", "update_ph_nw q8", "update_ph_se q13", "barrier q13, q8, q3, q7, q10"], + // "cz q13, q8": ["barrier q13, q8, q3, q7, q10", "sf_cz_nw q8", "sf_cz_se q13","sf_park q7","sf_park q10", "barrier q13, q8, q3, q7, q10", "update_ph_nw q8", "update_ph_se q13", "barrier q13, q8, q3, q7, q10"], + // "cz q8, q13": ["barrier q13, q8, q3, q7, q10", "sf_cz_nw q8", "sf_cz_se q13","sf_park q7","sf_park q10", "barrier q13, q8, q3, q7, q10", "update_ph_nw q8", "update_ph_se q13", "barrier q13, q8, q3, q7, q10"], // Edge 15/39 // "cz q16, q8": ["barrier q16, q8, q10, q14", "sf_cz_ne q8", "sf_cz_sw q16","sf_park q10","sf_park q14", "barrier q16, q8, q10, q14"], // "cz q8, q16": ["barrier q16, q8, q10, q14", "sf_cz_ne q8", "sf_cz_sw q16","sf_park q10","sf_park q14", "barrier q16, q8, q10, q14"], @@ -438,11 +521,24 @@ // "cz q14, q16": ["barrier q14, q16, q8, q10", "sf_cz_nw q14", "sf_cz_se q16","sf_park q8","sf_park q10", "barrier q14, q16, q8, q10"], "cz q16, q14": ["barrier q14, q16, q8, q10", "sf_cz_nw q14", "sf_cz_se q16","sf_park q8","sf_park q10", "barrier q14, q16, q8, q10", "update_ph_nw q14", "update_ph_se q16", "barrier q14, q16, q8, q10"], "cz q14, q16": ["barrier q14, q16, q8, q10", "sf_cz_nw q14", "sf_cz_se q16","sf_park q8","sf_park q10", "barrier q14, q16, q8, q10", "update_ph_nw q14", "update_ph_se q16", "barrier q14, q16, q8, q10"], + // "cz q16, q14": ["barrier q14, q16, q8, q10", "sf_cz_nw q14", "sf_cz_se q16","sf_park q10", "barrier q14, q16, q8, q10", "update_ph_nw q14", "update_ph_se q16", "barrier q14, q16, q8, q10"], + // "cz q14, q16": ["barrier q14, q16, q8, q10", "sf_cz_nw q14", "sf_cz_se q16","sf_park q10", "barrier q14, q16, q8, q10", "update_ph_nw q14", "update_ph_se q16", "barrier q14, q16, q8, q10"], // Edge 17/41 // "cz q7, q6": ["barrier q7, q6, q2", "sf_cz_ne q6", "sf_cz_sw q7","sf_park q2", "barrier q7, q6, q2"], // "cz q6, q7": ["barrier q7, q6, q2", "sf_cz_ne q6", "sf_cz_sw q7","sf_park q2", "barrier q7, q6, q2"], "cz q7, q6": ["barrier q7, q6, q2", "sf_cz_ne q6", "sf_cz_sw q7","sf_park q2", "barrier q7, q6, q2", "update_ph_ne q6", "update_ph_sw q7", "barrier q7, q6, q2"], "cz q6, q7": ["barrier q7, q6, q2", "sf_cz_ne q6", "sf_cz_sw q7","sf_park q2", "barrier q7, q6, q2", "update_ph_ne q6", "update_ph_sw q7", "barrier q7, q6, q2"], + // "cz q7, q6": ["barrier q7, q6, q2","rx12 q7", + // "barrier q7, q6, q2","i q7", + // "barrier q7, q6, q2","sf_park q7", + // "barrier q7, q6, q2", "sf_cz_ne q6", "sf_cz_sw q7","sf_park q2", + // "barrier q7, q6, q2","update_ph_ne q6", "update_ph_sw q7", "barrier q7, q6, q2", + // "barrier q7, q6, q2","i q7", + // "barrier q7, q6, q2", "sf_park q7", + // "barrier q7, q6, q2","rx12 q7", "barrier q7, q6, q2" ], + + "cz q6, q7": ["barrier q7, q6, q2", "sf_cz_ne q6", "sf_cz_sw q7","sf_park q2", "barrier q7, q6, q2", "update_ph_ne q6", "update_ph_sw q7", "barrier q7, q6, q2"], + // Edge 18/42 // "cz q7, q2": ["barrier q7, q2, q6", "sf_cz_nw q2", "sf_cz_se q7","sf_park q6", "barrier q7, q2, q6"], // "cz q2, q7": ["barrier q7, q2, q6", "sf_cz_nw q2", "sf_cz_se q7","sf_park q6", "barrier q7, q2, q6"], @@ -555,95 +651,203 @@ // "cz q2, q11": ["barrier q2, q11, q6", "sf_cz_ne q11", "sf_cz_sw q2","sf_park q6", "barrier q2, q11, q6", "update_ph_ne q11", "update_ph_sw q2", "barrier q2, q11, q6"], // "cz q11, q2": ["barrier q2, q11, q6", "sf_cz_ne q11", "sf_cz_sw q2","sf_park q6", "barrier q2, q11, q6", "update_ph_ne q11", "update_ph_sw q2", "barrier q2, q11, q6"], - - // // 2. flux-dance with hard-coded CZ gates in parallel. - // // Qubits are ordered in sf_cz target, control. - "flux_dance_1 q0": ["barrier q3, q5, q16, q8, q11, q2, q1, q10, q14, q6", + // Extra auxiliar CZs for surface-17 experiment (Jorge) + "cz_aux q3, q13": ["barrier q3, q13, q7, q8, q10", "sf_cz_aux q13", "sf_park q7","sf_park q8","sf_park q10", "barrier q3, q13, q7, q8, q10", "update_ph_nw q13", "update_ph_se q3", "barrier q3, q13, q7, q8, q10"], + "cz_aux q13, q3": ["barrier q3, q13, q7, q8, q10", "sf_cz_aux q13", "sf_park q7","sf_park q8","sf_park q10", "barrier q3, q13, q7, q8, q10", "update_ph_nw q13", "update_ph_se q3", "barrier q3, q13, q7, q8, q10"], + + "cz_aux q3, q5": ["barrier q3, q5, q1", "sf_cz_aux q3", "sf_park q1", "barrier q3, q5, q1", "update_ph_ne q3", "update_ph_sw q5", "barrier q3, q5, q1"], + "cz_aux q5, q3": ["barrier q3, q5, q1", "sf_cz_aux q3", "sf_park q1", "barrier q3, q5, q1", "update_ph_ne q3", "update_ph_sw q5", "barrier q3, q5, q1"], + + "cz_aux q7, q2": ["barrier q7, q2, q6", "sf_cz_aux q7", "sf_park q6", "barrier q7, q2, q6", "update_ph_nw q2", "update_ph_se q7", "barrier q7, q2, q6"], + "cz_aux q2, q7": ["barrier q7, q2, q6", "sf_cz_aux q7", "sf_park q6", "barrier q7, q2, q6", "update_ph_nw q2", "update_ph_se q7", "barrier q7, q2, q6"], + + // 2. flux-dance with hard-coded CZ gates in parallel. + // Qubits are ordered in sf_cz target, control. + "flux_dance_1 q0": ["barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", "sf_cz_ne q3", "sf_cz_sw q5", "sf_cz_sw q16", "sf_cz_ne q8", "sf_cz_ne q11", "sf_cz_sw q2", "sf_park q1", "sf_park q10", "sf_park q14","sf_park q6", - "barrier q3, q5, q16, q8, q11, q2, q1, q10, q14, q6", - "update_ph_park_1 q3", "update_ph_park_1 q16", "update_ph_park_1 q11", - // "update_ph_park_1 q5", "update_ph_park_1 q8", "update_ph_park_1 q2", - "barrier q3, q5, q16, q8, q11, q2, q1, q10, q14, q6"], + "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", + // "update_ph_step_1 q7", "update_ph_step_1 q14","update_ph_step_1 q12", "update_ph_step_1 q10", + // "update_ph_step_1 q6", "update_ph_step_1 q2","update_ph_step_1 q0", "update_ph_step_1 q15", + // "update_ph_step_1 q13", "update_ph_step_1 q16","update_ph_step_1 q1", "update_ph_step_1 q5", + // "update_ph_step_1 q4", + "i q0", + "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6"], - "flux_dance_2 q0": ["barrier q3, q1, q13, q8, q11, q6, q5, q10, q7, q2", + "flux_dance_2 q0": ["barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", "sf_cz_nw q3", "sf_cz_se q1", "sf_cz_se q13", "sf_cz_nw q8", "sf_cz_nw q11", "sf_cz_se q6", "sf_park q5", "sf_park q10", "sf_park q7","sf_park q2", - "barrier q3, q1, q13, q8, q11, q6, q5, q10, q7, q2", - "update_ph_park_2 q3", "update_ph_park_2 q13", "update_ph_park_2 q11", - // "update_ph_park_2 q1", "update_ph_park_2 q8", "update_ph_park_2 q6", - "barrier q3, q1, q13, q8, q11, q6, q5, q10, q7, q2"], + "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", + // "update_ph_step_2 q7", "update_ph_step_2 q14","update_ph_step_2 q12", "update_ph_step_2 q10", + // "update_ph_step_2 q6", "update_ph_step_2 q2","update_ph_step_2 q0", "update_ph_step_2 q15", + // "update_ph_step_2 q13", "update_ph_step_2 q16","update_ph_step_2 q1", "update_ph_step_2 q5", + // "update_ph_step_2 q4", + "i q0", + // "barrier q3, q1, q13, q8, q11, q6, q5, q10, q7, q2, q15, q4, q0, q9, q12, q16, q14", + // "cw_01 q6", "cw_01 q2", "cw_01 q0", + // "cw_01 q15", "cw_01 q13", "cw_01 q16", + // "cw_01 q1", "cw_01 q5", "cw_01 q4", + // "i q0", + "barrier q3, q1, q13, q8, q11, q6, q5, q10, q7, q2, q15, q4, q0, q9, q12, q16, q14"], - "flux_dance_3 q0": ["barrier q9, q4, q13, q3, q8, q0, q5, q10, q7, q2", + "flux_dance_3 q0": ["barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", "sf_cz_se q9", "sf_cz_nw q4", "sf_cz_nw q13", "sf_cz_se q3", "sf_cz_se q8", "sf_cz_nw q0", - "sf_park q5", "sf_park q10", "sf_park q7","sf_park q2", - "barrier q9, q4, q13, q3, q8, q0, q5, q10, q7, q2", - "update_ph_park_3 q9", "update_ph_park_3 q13", "update_ph_park_3 q8", - // "update_ph_park_3 q4", "update_ph_park_3 q3", "update_ph_park_3 q0", - "barrier q9, q4, q13, q3, q8, q0, q5, q10, q7, q2"], + "sf_park q5", "sf_park q10", "sf_park q7","sf_park q2", + "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", + // "update_ph_step_3 q7", "update_ph_step_3 q14","update_ph_step_3 q12", "update_ph_step_3 q10", + // "update_ph_step_3 q6", "update_ph_step_3 q2","update_ph_step_3 q0", "update_ph_step_3 q15", + // "update_ph_step_3 q13", "update_ph_step_3 q16","update_ph_step_3 q1", "update_ph_step_3 q5", + // "update_ph_step_3 q4", + "i q0", + "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6"], - "flux_dance_4 q0": ["barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0", + "flux_dance_4 q0": ["barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", "sf_cz_sw q9", "sf_cz_ne q5", "sf_cz_ne q15", "sf_cz_sw q3", "sf_cz_sw q8", "sf_cz_ne q2", - "sf_park q4", "sf_park q12", "sf_park q7","sf_park q0", - "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0", - "update_ph_park_4 q9", "update_ph_park_4 q15", "update_ph_park_4 q8", - // "update_ph_park_4 q5", "update_ph_park_4 q3", "update_ph_park_4 q2", - "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0"], + "sf_park q4", "sf_park q12", "sf_park q7","sf_park q0", + "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", + "update_ph_step_4 q11", "update_ph_step_4 q8", "update_ph_step_4 q3","update_ph_step_4 q9", + // "update_ph_step_4 q7", "update_ph_step_4 q14","update_ph_step_4 q12", "update_ph_step_4 q10", + "update_ph_step_4 q6", "update_ph_step_4 q2", "update_ph_step_4 q0","update_ph_step_4 q15", + "update_ph_step_4 q13", "update_ph_step_4 q16", "update_ph_step_4 q1","update_ph_step_4 q5", + "update_ph_step_4 q4", + // "i q0", + "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6"], - "flux_dance_5 q0": ["barrier q12, q1, q13, q7, q10, q4, q8, q3, q5", + "flux_dance_5 q0": ["barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", "sf_cz_ne q12", "sf_cz_sw q1", "sf_cz_sw q13", "sf_cz_ne q7", "sf_cz_ne q10", "sf_cz_sw q4", - "sf_park q8", "sf_park q3", "sf_park q5", - "barrier q12, q1, q13, q7, q10, q4, q8, q3, q5"], + "sf_park q8", "sf_park q3", "sf_park q5", + "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", + // "update_ph_step_5 q7", "update_ph_step_5 q14","update_ph_step_5 q12", "update_ph_step_5 q10", + // "update_ph_step_5 q6", "update_ph_step_5 q2","update_ph_step_5 q0", "update_ph_step_5 q15", + // "update_ph_step_5 q13", "update_ph_step_5 q16","update_ph_step_5 q1", "update_ph_step_5 q5", + // "update_ph_step_5 q4", + "i q0", + "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6"], - "flux_dance_6 q0": ["barrier q15, q12, q7, q2, q16, q10, q8, q3, q6, q14", + "flux_dance_6 q0": ["barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", "sf_cz_nw q15", "sf_cz_se q12", "sf_cz_se q7", "sf_cz_nw q2", "sf_cz_nw q16", "sf_cz_se q10", - "sf_park q8", "sf_park q3", "sf_park q6", "sf_park q14", - "barrier q15, q12, q7, q2, q16, q10, q8, q3, q6, q14"], + "sf_park q8", "sf_park q3", "sf_park q6", "sf_park q14", + "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", + // "update_ph_step_6 q7", "update_ph_step_6 q14","update_ph_step_6 q12", "update_ph_step_6 q10", + // "update_ph_step_6 q6", "update_ph_step_6 q2","update_ph_step_6 q0", "update_ph_step_6 q15", + // "update_ph_step_6 q13", "update_ph_step_6 q16","update_ph_step_6 q1", "update_ph_step_6 q5", + // "update_ph_step_6 q4", + "i q0", + // "barrier q3, q1, q13, q8, q11, q6, q5, q10, q7, q2, q15, q4, q0, q9, q12, q16, q14", + // "cw_01 q6", "cw_01 q2", "cw_01 q0", + // "cw_01 q15", "cw_01 q13", "cw_01 q16", + // "cw_01 q1", "cw_01 q5", "cw_01 q4", + // "i q0", + "barrier q3, q1, q13, q8, q11, q6, q5, q10, q7, q2, q15, q4, q0, q9, q12, q16, q14"], - "flux_dance_7 q0": ["barrier q15, q7, q10, q5, q16, q14, q8, q3, q4, q12", + "flux_dance_7 q0": ["barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", "sf_cz_se q15", "sf_cz_nw q7", "sf_cz_nw q10", "sf_cz_se q5", "sf_cz_se q16", "sf_cz_nw q14", - "sf_park q8", "sf_park q3", "sf_park q4", "sf_park q12", - "barrier q15, q7, q10, q5, q16, q14, q8, q3, q4, q12"], + "sf_park q8", "sf_park q3", "sf_park q4", "sf_park q12", + "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", + // "update_ph_step_7 q7", "update_ph_step_7 q14","update_ph_step_7 q12", "update_ph_step_7 q10", + // "update_ph_step_7 q6", "update_ph_step_7 q2","update_ph_step_7 q0", "update_ph_step_7 q15", + // "update_ph_step_7 q13", "update_ph_step_7 q16","update_ph_step_7 q1", "update_ph_step_7 q5", + // "update_ph_step_7 q4", + "i q0", + "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6"], - "flux_dance_8 q0": ["barrier q7, q6, q13, q10, q14, q0, q8, q3, q2", + "flux_dance_8 q0": ["barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", "sf_cz_sw q7", "sf_cz_ne q6", "sf_cz_ne q13", "sf_cz_sw q10", "sf_cz_sw q14", "sf_cz_ne q0", - "sf_park q8", "sf_park q3", "sf_park q2", - "barrier q7, q6, q13, q10, q14, q0, q8, q3, q2"], + "sf_park q8", "sf_park q3", "sf_park q2", + "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", + "update_ph_step_8 q7", "update_ph_step_8 q14","update_ph_step_8 q12", "update_ph_step_8 q10", + "update_ph_step_8 q11", "update_ph_step_8 q8", "update_ph_step_8 q3","update_ph_step_8 q9", + "update_ph_step_8 q6", "update_ph_step_8 q2","update_ph_step_8 q0", "update_ph_step_8 q15", + "update_ph_step_8 q13", "update_ph_step_8 q16","update_ph_step_8 q1", "update_ph_step_8 q5", + "update_ph_step_8 q4", + "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6"], + + // "flux_dance_5 q0": ["barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", + // "sf_cz_ne q12", "sf_cz_sw q1", "sf_cz_sw q13", "sf_cz_ne q7", "sf_cz_ne q10", "sf_cz_sw q4", + // "sf_park q5", + // "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", + // // "update_ph_step_5 q7", "update_ph_step_5 q14","update_ph_step_5 q12", "update_ph_step_5 q10", + // // "update_ph_step_5 q6", "update_ph_step_5 q2","update_ph_step_5 q0", "update_ph_step_5 q15", + // // "update_ph_step_5 q13", "update_ph_step_5 q16","update_ph_step_5 q1", "update_ph_step_5 q5", + // // "update_ph_step_5 q4", + // "i q0", + // "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6"], + + // "flux_dance_6 q0": ["barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", + // "sf_cz_nw q15", "sf_cz_se q12", "sf_cz_se q7", "sf_cz_nw q2", "sf_cz_nw q16", "sf_cz_se q10", + // "sf_park q6", "sf_park q14", "sf_park q0", + // "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", + // // "update_ph_step_6 q7", "update_ph_step_6 q14","update_ph_step_6 q12", "update_ph_step_6 q10", + // // "update_ph_step_6 q6", "update_ph_step_6 q2","update_ph_step_6 q0", "update_ph_step_6 q15", + // // "update_ph_step_6 q13", "update_ph_step_6 q16","update_ph_step_6 q1", "update_ph_step_6 q5", + // // "update_ph_step_6 q4", + // "i q0", + // // "barrier q3, q1, q13, q8, q11, q6, q5, q10, q7, q2, q15, q4, q0, q9, q12, q16, q14", + // // "cw_01 q6", "cw_01 q2", "cw_01 q0", + // // "cw_01 q15", "cw_01 q13", "cw_01 q16", + // // "cw_01 q1", "cw_01 q5", "cw_01 q4", + // // "i q0", + // "barrier q3, q1, q13, q8, q11, q6, q5, q10, q7, q2, q15, q4, q0, q9, q12, q16, q14"], + // "flux_dance_7 q0": ["barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", + // "sf_cz_se q15", "sf_cz_nw q7", "sf_cz_nw q10", "sf_cz_se q5", "sf_cz_se q16", "sf_cz_nw q14", + // "sf_park q4", "sf_park q12", "sf_park q1", + // "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", + // // "update_ph_step_7 q7", "update_ph_step_7 q14","update_ph_step_7 q12", "update_ph_step_7 q10", + // // "update_ph_step_7 q6", "update_ph_step_7 q2","update_ph_step_7 q0", "update_ph_step_7 q15", + // // "update_ph_step_7 q13", "update_ph_step_7 q16","update_ph_step_7 q1", "update_ph_step_7 q5", + // // "update_ph_step_7 q4", + // "i q0", + // "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6"], + + // "flux_dance_8 q0": ["barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", + // "sf_cz_sw q7", "sf_cz_ne q6", "sf_cz_ne q13", "sf_cz_sw q10", "sf_cz_sw q14", "sf_cz_ne q0", + // "sf_park q2", + // "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", + // "update_ph_step_8 q7", "update_ph_step_8 q14","update_ph_step_8 q12", "update_ph_step_8 q10", + // // "update_ph_step_8 q11", "update_ph_step_8 q8", "update_ph_step_8 q3","update_ph_step_8 q9", + // "update_ph_step_8 q6", "update_ph_step_8 q2","update_ph_step_8 q0", "update_ph_step_8 q15", + // "update_ph_step_8 q13", "update_ph_step_8 q16","update_ph_step_8 q1", "update_ph_step_8 q5", + // "update_ph_step_8 q4", + // "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6"], // // // Qubits are ordered in sf_cz target, control. "flux_dance_refocus_1 q0": ["barrier q3, q5, q16, q8, q11, q2, q1, q10, q14, q6, q0, q7, q15, q13, q12, q4, q9", "sf_cz_ne q3", "sf_cz_sw q5","sf_cz_sw q16", "sf_cz_ne q8", "sf_cz_ne q11", "sf_cz_sw q2", "sf_park q1", "sf_park q10", "sf_park q14","sf_park q6", - "cw_01 q0", "cw_01 q15", "cw_01 q13", "cw_01 q4", "cw_01 q9", + "cw_01 q0", "cw_01 q15", "cw_01 q13", "cw_01 q4", "barrier q3, q5, q16, q8, q11, q2, q1, q10, q14, q6, q0, q7, q15, q13, q12, q4, q9", - "update_ph_park_1 q11", "update_ph_park_1 q8", "update_ph_park_1 q3", - "cw_27 q0", "cw_27 q15", "cw_27 q13", "cw_27 q4", "cw_27 q9", + "cw_27 q0", "cw_27 q15", "cw_27 q13", "cw_27 q4", + "update_ph_step_4 q1","update_ph_step_4 q16","update_ph_step_4 q6", "barrier q3, q5, q16, q8, q11, q2, q1, q10, q14, q6, q0, q7, q15, q13, q12, q4, q9"], "flux_dance_refocus_2 q0": ["barrier q3, q1, q13, q8, q11, q6, q5, q10, q7, q2, q15, q4, q0, q9, q12, q16, q14", "sf_cz_nw q3", "sf_cz_se q1","sf_cz_se q13", "sf_cz_nw q8", "sf_cz_nw q11", "sf_cz_se q6", "sf_park q5", "sf_park q10", "sf_park q7","sf_park q2", - "cw_01 q15", "cw_01 q4", "cw_01 q0", "cw_01 q9", "cw_01 q16", + "cw_01 q15", "cw_01 q0", "cw_01 q9", "cw_01 q4", "barrier q3, q1, q13, q8, q11, q6, q5, q10, q7, q2, q15, q4, q0, q9, q12, q16, q14", - "cw_27 q15", "cw_27 q4", "cw_27 q0", "cw_27 q9", "cw_27 q16", + "cw_27 q15", "cw_27 q0", "cw_27 q9", "cw_27 q4", "barrier q3, q1, q13, q8, q11, q6, q5, q10, q7, q2, q15, q4, q0, q9, q12, q16, q14"], "flux_dance_refocus_3 q0": ["barrier q9, q4, q13, q3, q8, q0, q5, q10, q7, q2, q14, q16, q1, q12, q15, q6, q11", "sf_cz_se q9", "sf_cz_nw q4","sf_cz_nw q13", "sf_cz_se q3", "sf_cz_se q8", "sf_cz_nw q0", "sf_park q5", "sf_park q10", "sf_park q7","sf_park q2", - "cw_01 q16", "cw_01 q1", "cw_01 q15", "cw_01 q6", "cw_01 q11", - "barrier q9, q4, q13, q3, q8, q0, q5, q10, q7, q2, q14, q16, q1, q12, q15, q6, q11", - "update_ph_park_1 q9", - "cw_27 q16", "cw_27 q1", "cw_27 q15", "cw_27 q6", "cw_27 q11", + "cw_01 q16", "cw_01 q1", "cw_01 q15", "cw_01 q6", + "barrier q9, q4, q13, q3, q8, q0, q5, q10, q7, q2, q14, q16, q1, q12, q15, q6, q11", + "cw_27 q16", "cw_27 q1", "cw_27 q15", "cw_27 q6", + "update_ph_step_4 q13", "barrier q9, q4, q13, q3, q8, q0, q5, q10, q7, q2, q14, q16, q1, q12, q15, q6, q11"], "flux_dance_refocus_4 q0": ["barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", "sf_cz_sw q9", "sf_cz_ne q5", "sf_cz_ne q15", "sf_cz_sw q3", "sf_cz_sw q8", "sf_cz_ne q2", "sf_park q4", "sf_park q12", "sf_park q7","sf_park q0", - "cw_01 q1", "cw_01 q16", "cw_01 q13", "cw_01 q11", "cw_01 q6", + "cw_01 q1", "cw_01 q16", "cw_01 q13", "cw_01 q6", "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", - "cw_27 q1", "cw_27 q16", "cw_27 q13", "cw_27 q11", "cw_27 q6", + "cw_27 q1", "cw_27 q16", "cw_27 q13", "cw_27 q6", + "update_ph_step_4 q11", "update_ph_step_4 q8", "update_ph_step_4 q3","update_ph_step_4 q9", + "update_ph_step_4 q2", "update_ph_step_4 q0","update_ph_step_4 q15", + "update_ph_step_4 q5", + "update_ph_step_4 q4", "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6"], "flux_dance_refocus_5 q0": ["barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", @@ -652,8 +856,8 @@ "sf_park q8", "sf_park q3", "sf_park q5", "cw_01 q15", "cw_01 q6", "cw_01 q0", "cw_01 q2", "cw_01 q16", "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", - "update_ph_park_1 q12", "update_ph_park_1 q7", "update_ph_park_1 q10", "cw_27 q15", "cw_27 q6", "cw_27 q0", "cw_27 q2", "cw_27 q16", + "update_ph_step_8 q1","update_ph_step_8 q5","update_ph_step_8 q4", "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6"], "flux_dance_refocus_6 q0": ["barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", @@ -671,8 +875,8 @@ "sf_park q8", "sf_park q3", "sf_park q4", "sf_park q12", "cw_01 q1", "cw_01 q13", "cw_01 q6", "cw_01 q2", "cw_01 q0", "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", - "update_ph_park_1 q14", "cw_27 q1", "cw_27 q13", "cw_27 q6", "cw_27 q2", "cw_27 q0", + "update_ph_step_8 q15","update_ph_step_8 q16", "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6"], "flux_dance_refocus_8 q0": ["barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", @@ -682,72 +886,53 @@ "cw_01 q1", "cw_01 q5", "cw_01 q4", "cw_01 q15", "cw_01 q16", "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6", "cw_27 q1", "cw_27 q5", "cw_27 q4", "cw_27 q15", "cw_27 q16", + "update_ph_step_8 q7", "update_ph_step_8 q14","update_ph_step_8 q12", "update_ph_step_8 q10", + "update_ph_step_8 q6", "update_ph_step_8 q2","update_ph_step_8 q0", + "update_ph_step_8 q13", "barrier q9, q5, q15, q3, q8, q2, q4, q12, q7, q0, q1, q10, q16, q13, q14, q11, q6"], - - // fluxing steps for parity checks in a distance_7 repetition code - // "repetition_code_1 q0": ["barrier q9, q5, q8, q2, q4, q7, q0, q6", - // "sf_cz_sw q9", "sf_cz_ne q5", "sf_cz_sw q7", "sf_cz_ne q6", "sf_cz_se q8", "sf_cz_nw q0", - // "sf_park q2", "sf_park q4", - // "barrier q9, q5, q8, q2, q4, q7, q0, q6"], - - // "repetition_code_2 q0": ["barrier q9, q5, q3, q8, q2, q4, q7, q0, q13, q10", - // "sf_cz_se q9", "sf_cz_nw q4", "sf_cz_sw q13", "sf_cz_ne q7", "sf_cz_sw q8", "sf_cz_ne q2", - // "sf_park q5", "sf_park q3", "sf_park q10", "sf_park q0", - // "barrier q9, q5, q3, q8, q2, q4, q7, q0, q13, q10"], - - // "repetition_code_3 q0": ["barrier q3, q8, q2, q7, q16, q13, q10, q11, q6, q14", - // "sf_cz_nw q13", "sf_cz_se q3", "sf_cz_ne q11", "sf_cz_sw q2", "sf_cz_se q16", "sf_cz_nw q14", - // "sf_park q10", "sf_park q7", "sf_park q8", "sf_park q6", - // "barrier q3, q8, q2, q7, q16, q13, q10, q11, q6, q14"], - - // "repetition_code_4 q0": ["barrier q5, q3, q2, q1, q14, q11, q6, q0", - // "sf_cz_ne q3", "sf_cz_sw q5", "sf_cz_nw q11", "sf_cz_se q6", "sf_cz_sw q14", "sf_cz_ne q0", - // "sf_park q1", "sf_park q2", - // "barrier q5, q3, q2, q1, q14, q11, q6, q0"], - - // fluxing steps for parity checks in a distance_7 repetition code with phase updates - "repetition_code_1 q0": ["barrier q9, q5, q8, q2, q4, q7, q0, q6, q13, q16", - "sf_cz_sw q9", "sf_cz_ne q5", "sf_cz_sw q7", "sf_cz_ne q6", "sf_cz_se q8", "sf_cz_nw q0", - "sf_park q2", "sf_park q4", - "cw_01 q13", "cw_01 q16", - "barrier q9, q5, q8, q2, q4, q7, q0, q6, q13, q16", - // "update_ph_sw q9", "update_ph_ne q5", "update_ph_sw q7", "update_ph_ne q6", "update_ph_se q8", "update_ph_nw q0", - // "update_ph_park_1 q2", "update_ph_park_1 q4", - "cw_27 q13", "cw_27 q16", - "barrier q9, q5, q8, q2, q4, q7, q0, q6, q13, q16"], - - "repetition_code_2 q0": ["barrier q9, q5, q3, q8, q2, q4, q7, q0, q13, q10, q6, q16", - "sf_cz_se q9", "sf_cz_nw q4", "sf_cz_sw q13", "sf_cz_ne q7", "sf_cz_sw q8", "sf_cz_ne q2", - "sf_park q5", "sf_park q3", "sf_park q10", "sf_park q0", - "cw_01 q6", "cw_01 q16", - "barrier q9, q5, q3, q8, q2, q4, q7, q0, q13, q10, q6, q16", - // "update_ph_se q9", "update_ph_nw q4", "update_ph_sw q13", "update_ph_ne q7", "update_ph_sw q8", "update_ph_ne q2", - // "update_ph_park_2 q5", "update_ph_park_2 q3", "update_ph_park_2 q10", "update_ph_park_2 q0", - "update_ph_park_1 q9", "update_ph_park_1 q7", "update_ph_park_1 q8", - "cw_27 q6", "cw_27 q16", - "barrier q9, q5, q3, q8, q2, q4, q7, q0, q13, q10, q6, q16"], - - "repetition_code_3 q0": ["barrier q3, q8, q2, q7, q16, q13, q10, q11, q6, q14, q0, q4, q5", - "sf_cz_nw q13", "sf_cz_se q3", "sf_cz_ne q11", "sf_cz_sw q2", "sf_cz_se q16", "sf_cz_nw q14", - "sf_park q10", "sf_park q7", "sf_park q8", "sf_park q6", - "cw_01 q5", "cw_01 q4", "cw_01 q0", - "barrier q3, q8, q2, q7, q16, q13, q10, q11, q6, q14, q0, q4, q5", - // "update_ph_nw q13", "update_ph_se q3", "update_ph_ne q11", "update_ph_sw q2", "update_ph_se q16", "update_ph_nw q14", - // "update_ph_park_3 q10", "update_ph_park_3 q7", "update_ph_park_3 q8", "update_ph_park_3 q6", - "cw_27 q5", "cw_27 q4", "cw_27 q0", - "barrier q3, q8, q2, q7, q16, q13, q10, q11, q6, q14, q0, q4, q5"], - - "repetition_code_4 q0": ["barrier q5, q3, q2, q1, q14, q11, q6, q0, q4, q13, q16", - "sf_cz_ne q3", "sf_cz_sw q5", "sf_cz_nw q11", "sf_cz_se q6", "sf_cz_sw q14", "sf_cz_ne q0", - "sf_park q1", "sf_park q2", - "cw_01 q4", "cw_01 q13", "cw_01 q16", - "barrier q5, q3, q2, q1, q14, q11, q6, q0, q4, q13, q16", - // "update_ph_ne q3", "update_ph_sw q5", "update_ph_nw q11", "update_ph_se q6", "update_ph_sw q14", "update_ph_ne q0", - // "update_ph_park_4 q1", "update_ph_park_4 q2", - "update_ph_park_1 q3", "update_ph_park_1 q11", "update_ph_park_1 q14", - "cw_27 q4", "cw_27 q13", "cw_27 q16", - "barrier q5, q3, q2, q1, q14, q11, q6, q0, q4, q13, q16"], + // [AUTOMATIC GENERATED GATE SEQUENCE - QCoCircuits] + // Gate sequence for distance-3 repetition code. Using Z2-Z4 only + "repetition_code_1 q0": [ + "barrier q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12, q13, q14, q15, q16", + "sf_cz_nw q14", "sf_cz_se q16", // Z2-D6 2Q gate + "sf_park q10", // Z4 park + "sf_park q8", // X2 park + "barrier q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12, q13, q14, q15, q16", + "update_ph_nw q14", + "update_ph_se q16", + "barrier q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12, q13, q14, q15, q16" + ], + "repetition_code_2 q0": [ + "barrier q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12, q13, q14, q15, q16", + "sf_cz_sw q14", "sf_cz_ne q0", // Z2-D3 2Q gate + "barrier q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12, q13, q14, q15, q16", + "update_ph_sw q14", + "update_ph_ne q0", + "barrier q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12, q13, q14, q15, q16" + ], + "repetition_code_3 q0": [ + "barrier q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12, q13, q14, q15, q16", + "sf_cz_sw q10", "sf_cz_ne q13", // Z4-D5 2Q gate + "sf_park q3", // X3 park + "sf_park q7", // Z1 park + "sf_park q8", // X2 park + "barrier q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12, q13, q14, q15, q16", + "update_ph_sw q10", + "update_ph_ne q13", + "barrier q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12, q13, q14, q15, q16" + ], + "repetition_code_4 q0": [ + "barrier q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12, q13, q14, q15, q16", + "sf_cz_se q10", "sf_cz_nw q16", // Z4-D6 2Q gate + "sf_park q14", // Z2 park + "sf_park q8", // X2 park + "sf_park q0", // D3 park + "barrier q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12, q13, q14, q15, q16", + "update_ph_se q10", + "update_ph_nw q16", + "barrier q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12, q13, q14, q15, q16" + ], // CC additions "cnot_park1 %0 %1 %2": ["ry90 %1", "cz %0 %1", "park_cz %2", "ry90 %1"], @@ -782,7 +967,10 @@ "rphim45 %0": ["cw_15 %0"], "rphi45 %0": ["cw_16 %0"], "rphi135m90 %0": ["cw_17 %0"], - "rphi13590 %0": ["cw_18 %0"] + "rphi13590 %0": ["cw_18 %0"], + "PRNG %0": ["cw_127 %0"], // Reserved for PRNG operation + "rx175 %0": ["cw_31 %0"], + "rx170 %0": ["cw_32 %0"] }, @@ -822,6 +1010,16 @@ "static_codeword_override": [1] } }, + "rxm180": { + "duration": @MW_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "mw", + "cc_light_instr": "x", + "cc": { + "ref_signal": "single-qubit-mw", // NB: reference, instead of defining "signal" here + "static_codeword_override": [27] + } + }, "ry180": { "duration": @MW_DURATION@, "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], @@ -832,6 +1030,16 @@ "static_codeword_override": [2] } }, + "rym180": { + "duration": @MW_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "mw", + "cc_light_instr": "x", + "cc": { + "ref_signal": "single-qubit-mw", // NB: reference, instead of defining "signal" here + "static_codeword_override": [28] + } + }, "rx90": { "duration": @MW_DURATION@, "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], @@ -1243,7 +1451,27 @@ "cc_light_instr": "rx12", "cc": { "ref_signal": "single-qubit-mw", - "static_codeword_override": [31] + "static_codeword_override": [9] + } + }, + "rx23": { + "duration": @MW_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "mw", + "cc_light_instr": "rx23", + "cc": { + "ref_signal": "single-qubit-mw", + "static_codeword_override": [30] + } + }, + "cw_lru": { + "duration": @MW_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "mw", + "cc_light_instr": "cw_01", + "cc": { + "ref_signal": "single-qubit-mw", + "static_codeword_override": [1] } }, // cw_00 .. cw_31 @@ -1567,6 +1795,176 @@ "static_codeword_override": [31] } }, + "cw_32": { + "duration": @MW_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "mw", + "cc_light_instr": "cw_32", + "cc": { + "ref_signal": "single-qubit-mw", + "static_codeword_override": [32] + } + }, + "cw_33": { + "duration": @MW_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "mw", + "cc_light_instr": "cw_33", + "cc": { + "ref_signal": "single-qubit-mw", + "static_codeword_override": [33] + } + }, + "cw_34": { + "duration": @MW_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "mw", + "cc_light_instr": "cw_34", + "cc": { + "ref_signal": "single-qubit-mw", + "static_codeword_override": [34] + } + }, + "cw_35": { + "duration": @MW_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "mw", + "cc_light_instr": "cw_35", + "cc": { + "ref_signal": "single-qubit-mw", + "static_codeword_override": [35] + } + }, + "cw_36": { + "duration": @MW_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "mw", + "cc_light_instr": "cw_36", + "cc": { + "ref_signal": "single-qubit-mw", + "static_codeword_override": [36] + } + }, + "cw_37": { + "duration": @MW_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "mw", + "cc_light_instr": "cw_37", + "cc": { + "ref_signal": "single-qubit-mw", + "static_codeword_override": [37] + } + }, + "cw_38": { + "duration": @MW_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "mw", + "cc_light_instr": "cw_38", + "cc": { + "ref_signal": "single-qubit-mw", + "static_codeword_override": [38] + } + }, + "cw_39": { + "duration": @MW_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "mw", + "cc_light_instr": "cw_39", + "cc": { + "ref_signal": "single-qubit-mw", + "static_codeword_override": [39] + } + }, + "cw_40": { + "duration": @MW_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "mw", + "cc_light_instr": "cw_40", + "cc": { + "ref_signal": "single-qubit-mw", + "static_codeword_override": [40] + } + }, + "cw_41": { + "duration": @MW_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "mw", + "cc_light_instr": "cw_41", + "cc": { + "ref_signal": "single-qubit-mw", + "static_codeword_override": [41] + } + }, + "cw_42": { + "duration": @MW_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "mw", + "cc_light_instr": "cw_42", + "cc": { + "ref_signal": "single-qubit-mw", + "static_codeword_override": [42] + } + }, + "cw_43": { + "duration": @MW_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "mw", + "cc_light_instr": "cw_43", + "cc": { + "ref_signal": "single-qubit-mw", + "static_codeword_override": [43] + } + }, + "cw_44": { + "duration": @MW_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "mw", + "cc_light_instr": "cw_44", + "cc": { + "ref_signal": "single-qubit-mw", + "static_codeword_override": [44] + } + }, + "cw_45": { + "duration": @MW_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "mw", + "cc_light_instr": "cw_45", + "cc": { + "ref_signal": "single-qubit-mw", + "static_codeword_override": [45] + } + }, + "cw_46": { + "duration": @MW_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "mw", + "cc_light_instr": "cw_46", + "cc": { + "ref_signal": "single-qubit-mw", + "static_codeword_override": [46] + } + }, + "cw_47": { + "duration": @MW_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "mw", + "cc_light_instr": "cw_47", + "cc": { + "ref_signal": "single-qubit-mw", + "static_codeword_override": [47] + } + }, + "cw_127": { + "duration": @MW_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "mw", + "cc_light_instr": "cw_127", + "cc": { + "ref_signal": "single-qubit-mw", + "static_codeword_override": [127] + } + }, // fl_cw_00 .. fl_cw_07 "fl_cw_00": { "duration": @FLUX_DURATION@, @@ -1819,16 +2217,6 @@ // } // }, - "cw_27": { - "duration": @MW_DURATION@, - "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], - "type": "mw", - "cc_light_instr": "cw_27", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [27] - } - }, // "cw_27 q1": { // "duration": @MW_DURATION@, // "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], @@ -2093,6 +2481,39 @@ "static_codeword_override": [5] } }, + + "sf_custom_wf": { + "duration": @FLUX_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "flux", + "cc_light_instr": "sf_custom_wf", + "cc": { + "ref_signal": "single-qubit-flux", + "static_codeword_override": [7] + } + }, + + "sf_cz_aux": { + "duration": @FLUX_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "flux", + "cc_light_instr": "sf_park", + "cc": { + "ref_signal": "single-qubit-flux", + "static_codeword_override": [7] + } + }, + + "sf_lru": { + "duration": @FLUX_DURATION@, + "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], + "type": "flux", + "cc_light_instr": "sf_cz_se", + "cc": { + "ref_signal": "single-qubit-flux", + "static_codeword_override": [1] + } + }, // "sf_park q0": { // "duration": @FLUX_DURATION@, // "matrix": [ [0.0,1.0], [1.0,0.0], [1.0,0.0], [0.0,0.0] ], @@ -3850,12 +4271,25 @@ // }, // END OF AUTOMATICALLY GENERATED SECTION + "update_ph_LRU": { + "duration": @MW_DURATION@, + "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + "type": "mw", + "cc_light_instr": "update_ph_LRU", + "cc": { + "ref_signal": "single-qubit-mw", + "static_codeword_override": [ + 51 + ] + } + }, + // BEGIN OF AUTOMATICALLY GENERATED SECTION - "update_ph_park_1": { + "update_ph_step_1": { "duration": @MW_DURATION@, "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], "type": "mw", - "cc_light_instr": "update_ph_park_1", + "cc_light_instr": "update_ph_step_1", "cc": { "ref_signal": "single-qubit-mw", "static_codeword_override": [ @@ -3863,7 +4297,7 @@ ] } }, - // "update_ph_park_1 q1": { + // "update_ph_step_1 q1": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -3884,7 +4318,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_1", + // "cc_light_instr": "update_ph_step_1", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -3892,7 +4326,7 @@ // ] // } // }, - // "update_ph_park_1 q2": { + // "update_ph_step_1 q2": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -3913,7 +4347,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_1", + // "cc_light_instr": "update_ph_step_1", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -3921,7 +4355,7 @@ // ] // } // }, - // "update_ph_park_1 q3": { + // "update_ph_step_1 q3": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -3942,7 +4376,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_1", + // "cc_light_instr": "update_ph_step_1", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -3950,7 +4384,7 @@ // ] // } // }, - // "update_ph_park_1 q4": { + // "update_ph_step_1 q4": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -3971,7 +4405,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_1", + // "cc_light_instr": "update_ph_step_1", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -3979,7 +4413,7 @@ // ] // } // }, - // "update_ph_park_1 q5": { + // "update_ph_step_1 q5": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4000,7 +4434,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_1", + // "cc_light_instr": "update_ph_step_1", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4008,7 +4442,7 @@ // ] // } // }, - // "update_ph_park_1 q6": { + // "update_ph_step_1 q6": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4029,7 +4463,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_1", + // "cc_light_instr": "update_ph_step_1", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4037,7 +4471,7 @@ // ] // } // }, - // "update_ph_park_1 q7": { + // "update_ph_step_1 q7": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4058,7 +4492,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_1", + // "cc_light_instr": "update_ph_step_1", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4066,7 +4500,7 @@ // ] // } // }, - // "update_ph_park_1 q8": { + // "update_ph_step_1 q8": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4087,7 +4521,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_1", + // "cc_light_instr": "update_ph_step_1", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4095,7 +4529,7 @@ // ] // } // }, - // "update_ph_park_1 q9": { + // "update_ph_step_1 q9": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4116,7 +4550,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_1", + // "cc_light_instr": "update_ph_step_1", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4124,7 +4558,7 @@ // ] // } // }, - // "update_ph_park_1 q10": { + // "update_ph_step_1 q10": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4145,7 +4579,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_1", + // "cc_light_instr": "update_ph_step_1", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4153,7 +4587,7 @@ // ] // } // }, - // "update_ph_park_1 q11": { + // "update_ph_step_1 q11": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4174,7 +4608,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_1", + // "cc_light_instr": "update_ph_step_1", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4182,7 +4616,7 @@ // ] // } // }, - // "update_ph_park_1 q12": { + // "update_ph_step_1 q12": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4203,7 +4637,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_1", + // "cc_light_instr": "update_ph_step_1", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4211,7 +4645,7 @@ // ] // } // }, - // "update_ph_park_1 q13": { + // "update_ph_step_1 q13": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4232,7 +4666,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_1", + // "cc_light_instr": "update_ph_step_1", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4240,7 +4674,7 @@ // ] // } // }, - // "update_ph_park_1 q14": { + // "update_ph_step_1 q14": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4261,7 +4695,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_1", + // "cc_light_instr": "update_ph_step_1", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4269,7 +4703,7 @@ // ] // } // }, - // "update_ph_park_1 q15": { + // "update_ph_step_1 q15": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4290,7 +4724,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_1", + // "cc_light_instr": "update_ph_step_1", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4298,7 +4732,7 @@ // ] // } // }, - // "update_ph_park_1 q16": { + // "update_ph_step_1 q16": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4319,7 +4753,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_1", + // "cc_light_instr": "update_ph_step_1", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4327,11 +4761,11 @@ // ] // } // }, - "update_ph_park_2": { + "update_ph_step_2": { "duration": @MW_DURATION@, "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], "type": "mw", - "cc_light_instr": "update_ph_park_2", + "cc_light_instr": "update_ph_step_2", "cc": { "ref_signal": "single-qubit-mw", "static_codeword_override": [ @@ -4339,7 +4773,7 @@ ] } }, - // "update_ph_park_2 q1": { + // "update_ph_step_2 q1": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4360,7 +4794,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_2", + // "cc_light_instr": "update_ph_step_2", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4368,7 +4802,7 @@ // ] // } // }, - // "update_ph_park_2 q2": { + // "update_ph_step_2 q2": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4389,7 +4823,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_2", + // "cc_light_instr": "update_ph_step_2", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4397,7 +4831,7 @@ // ] // } // }, - // "update_ph_park_2 q3": { + // "update_ph_step_2 q3": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4418,7 +4852,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_2", + // "cc_light_instr": "update_ph_step_2", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4426,7 +4860,7 @@ // ] // } // }, - // "update_ph_park_2 q4": { + // "update_ph_step_2 q4": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4447,7 +4881,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_2", + // "cc_light_instr": "update_ph_step_2", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4455,7 +4889,7 @@ // ] // } // }, - // "update_ph_park_2 q5": { + // "update_ph_step_2 q5": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4476,7 +4910,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_2", + // "cc_light_instr": "update_ph_step_2", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4484,7 +4918,7 @@ // ] // } // }, - // "update_ph_park_2 q6": { + // "update_ph_step_2 q6": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4505,7 +4939,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_2", + // "cc_light_instr": "update_ph_step_2", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4513,7 +4947,7 @@ // ] // } // }, - // "update_ph_park_2 q7": { + // "update_ph_step_2 q7": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4534,7 +4968,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_2", + // "cc_light_instr": "update_ph_step_2", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4542,7 +4976,7 @@ // ] // } // }, - // "update_ph_park_2 q8": { + // "update_ph_step_2 q8": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4563,7 +4997,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_2", + // "cc_light_instr": "update_ph_step_2", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4571,7 +5005,7 @@ // ] // } // }, - // "update_ph_park_2 q9": { + // "update_ph_step_2 q9": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4592,7 +5026,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_2", + // "cc_light_instr": "update_ph_step_2", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4600,7 +5034,7 @@ // ] // } // }, - // "update_ph_park_2 q10": { + // "update_ph_step_2 q10": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4621,7 +5055,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_2", + // "cc_light_instr": "update_ph_step_2", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4629,7 +5063,7 @@ // ] // } // }, - // "update_ph_park_2 q11": { + // "update_ph_step_2 q11": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4650,7 +5084,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_2", + // "cc_light_instr": "update_ph_step_2", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4658,7 +5092,7 @@ // ] // } // }, - // "update_ph_park_2 q12": { + // "update_ph_step_2 q12": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4679,7 +5113,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_2", + // "cc_light_instr": "update_ph_step_2", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4687,7 +5121,7 @@ // ] // } // }, - // "update_ph_park_2 q13": { + // "update_ph_step_2 q13": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4708,7 +5142,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_2", + // "cc_light_instr": "update_ph_step_2", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4716,7 +5150,7 @@ // ] // } // }, - // "update_ph_park_2 q14": { + // "update_ph_step_2 q14": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4737,7 +5171,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_2", + // "cc_light_instr": "update_ph_step_2", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4745,7 +5179,7 @@ // ] // } // }, - // "update_ph_park_2 q15": { + // "update_ph_step_2 q15": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4766,7 +5200,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_2", + // "cc_light_instr": "update_ph_step_2", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4774,7 +5208,7 @@ // ] // } // }, - // "update_ph_park_2 q16": { + // "update_ph_step_2 q16": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4795,7 +5229,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_2", + // "cc_light_instr": "update_ph_step_2", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4803,11 +5237,11 @@ // ] // } // }, - "update_ph_park_3": { + "update_ph_step_3": { "duration": @MW_DURATION@, "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], "type": "mw", - "cc_light_instr": "update_ph_park_3", + "cc_light_instr": "update_ph_step_3", "cc": { "ref_signal": "single-qubit-mw", "static_codeword_override": [ @@ -4815,7 +5249,7 @@ ] } }, - // "update_ph_park_3 q1": { + // "update_ph_step_3 q1": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4836,7 +5270,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_3", + // "cc_light_instr": "update_ph_step_3", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4844,7 +5278,7 @@ // ] // } // }, - // "update_ph_park_3 q2": { + // "update_ph_step_3 q2": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4865,7 +5299,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_3", + // "cc_light_instr": "update_ph_step_3", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4873,7 +5307,7 @@ // ] // } // }, - // "update_ph_park_3 q3": { + // "update_ph_step_3 q3": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4894,7 +5328,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_3", + // "cc_light_instr": "update_ph_step_3", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4902,7 +5336,7 @@ // ] // } // }, - // "update_ph_park_3 q4": { + // "update_ph_step_3 q4": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4923,7 +5357,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_3", + // "cc_light_instr": "update_ph_step_3", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4931,7 +5365,7 @@ // ] // } // }, - // "update_ph_park_3 q5": { + // "update_ph_step_3 q5": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4952,7 +5386,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_3", + // "cc_light_instr": "update_ph_step_3", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4960,7 +5394,7 @@ // ] // } // }, - // "update_ph_park_3 q6": { + // "update_ph_step_3 q6": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -4981,7 +5415,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_3", + // "cc_light_instr": "update_ph_step_3", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -4989,7 +5423,7 @@ // ] // } // }, - // "update_ph_park_3 q7": { + // "update_ph_step_3 q7": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5010,7 +5444,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_3", + // "cc_light_instr": "update_ph_step_3", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5018,7 +5452,7 @@ // ] // } // }, - // "update_ph_park_3 q8": { + // "update_ph_step_3 q8": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5039,7 +5473,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_3", + // "cc_light_instr": "update_ph_step_3", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5047,7 +5481,7 @@ // ] // } // }, - // "update_ph_park_3 q9": { + // "update_ph_step_3 q9": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5068,7 +5502,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_3", + // "cc_light_instr": "update_ph_step_3", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5076,7 +5510,7 @@ // ] // } // }, - // "update_ph_park_3 q10": { + // "update_ph_step_3 q10": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5097,7 +5531,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_3", + // "cc_light_instr": "update_ph_step_3", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5105,7 +5539,7 @@ // ] // } // }, - // "update_ph_park_3 q11": { + // "update_ph_step_3 q11": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5126,7 +5560,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_3", + // "cc_light_instr": "update_ph_step_3", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5134,7 +5568,7 @@ // ] // } // }, - // "update_ph_park_3 q12": { + // "update_ph_step_3 q12": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5155,7 +5589,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_3", + // "cc_light_instr": "update_ph_step_3", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5163,7 +5597,7 @@ // ] // } // }, - // "update_ph_park_3 q13": { + // "update_ph_step_3 q13": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5184,7 +5618,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_3", + // "cc_light_instr": "update_ph_step_3", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5192,7 +5626,7 @@ // ] // } // }, - // "update_ph_park_3 q14": { + // "update_ph_step_3 q14": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5213,7 +5647,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_3", + // "cc_light_instr": "update_ph_step_3", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5221,7 +5655,7 @@ // ] // } // }, - // "update_ph_park_3 q15": { + // "update_ph_step_3 q15": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5242,7 +5676,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_3", + // "cc_light_instr": "update_ph_step_3", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5250,7 +5684,7 @@ // ] // } // }, - // "update_ph_park_3 q16": { + // "update_ph_step_3 q16": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5271,7 +5705,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_3", + // "cc_light_instr": "update_ph_step_3", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5279,11 +5713,11 @@ // ] // } // }, - "update_ph_park_4": { + "update_ph_step_4": { "duration": @MW_DURATION@, "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], "type": "mw", - "cc_light_instr": "update_ph_park_4", + "cc_light_instr": "update_ph_step_4", "cc": { "ref_signal": "single-qubit-mw", "static_codeword_override": [ @@ -5291,7 +5725,7 @@ ] } }, - // "update_ph_park_4 q1": { + // "update_ph_step_4 q1": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5312,7 +5746,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_4", + // "cc_light_instr": "update_ph_step_4", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5320,7 +5754,7 @@ // ] // } // }, - // "update_ph_park_4 q2": { + // "update_ph_step_4 q2": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5341,7 +5775,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_4", + // "cc_light_instr": "update_ph_step_4", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5349,7 +5783,7 @@ // ] // } // }, - // "update_ph_park_4 q3": { + // "update_ph_step_4 q3": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5370,7 +5804,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_4", + // "cc_light_instr": "update_ph_step_4", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5378,7 +5812,7 @@ // ] // } // }, - // "update_ph_park_4 q4": { + // "update_ph_step_4 q4": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5399,7 +5833,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_4", + // "cc_light_instr": "update_ph_step_4", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5407,7 +5841,7 @@ // ] // } // }, - // "update_ph_park_4 q5": { + // "update_ph_step_4 q5": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5428,7 +5862,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_4", + // "cc_light_instr": "update_ph_step_4", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5436,7 +5870,7 @@ // ] // } // }, - // "update_ph_park_4 q6": { + // "update_ph_step_4 q6": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5457,7 +5891,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_4", + // "cc_light_instr": "update_ph_step_4", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5465,7 +5899,7 @@ // ] // } // }, - // "update_ph_park_4 q7": { + // "update_ph_step_4 q7": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5486,7 +5920,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_4", + // "cc_light_instr": "update_ph_step_4", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5494,7 +5928,7 @@ // ] // } // }, - // "update_ph_park_4 q8": { + // "update_ph_step_4 q8": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5515,7 +5949,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_4", + // "cc_light_instr": "update_ph_step_4", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5523,7 +5957,7 @@ // ] // } // }, - // "update_ph_park_4 q9": { + // "update_ph_step_4 q9": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5544,7 +5978,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_4", + // "cc_light_instr": "update_ph_step_4", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5552,7 +5986,7 @@ // ] // } // }, - // "update_ph_park_4 q10": { + // "update_ph_step_4 q10": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5573,7 +6007,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_4", + // "cc_light_instr": "update_ph_step_4", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5581,7 +6015,7 @@ // ] // } // }, - // "update_ph_park_4 q11": { + // "update_ph_step_4 q11": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5602,7 +6036,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_4", + // "cc_light_instr": "update_ph_step_4", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5610,7 +6044,7 @@ // ] // } // }, - // "update_ph_park_4 q12": { + // "update_ph_step_4 q12": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5631,7 +6065,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_4", + // "cc_light_instr": "update_ph_step_4", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5639,7 +6073,7 @@ // ] // } // }, - // "update_ph_park_4 q13": { + // "update_ph_step_4 q13": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5660,7 +6094,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_4", + // "cc_light_instr": "update_ph_step_4", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5668,7 +6102,7 @@ // ] // } // }, - // "update_ph_park_4 q14": { + // "update_ph_step_4 q14": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5689,7 +6123,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_4", + // "cc_light_instr": "update_ph_step_4", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5697,7 +6131,7 @@ // ] // } // }, - // "update_ph_park_4 q15": { + // "update_ph_step_4 q15": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5718,7 +6152,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_4", + // "cc_light_instr": "update_ph_step_4", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5726,7 +6160,7 @@ // ] // } // }, - // "update_ph_park_4 q16": { + // "update_ph_step_4 q16": { // "duration": @MW_DURATION@, // "matrix": [ // [ @@ -5747,7 +6181,7 @@ // ] // ], // "type": "mw", - // "cc_light_instr": "update_ph_park_4", + // "cc_light_instr": "update_ph_step_4", // "cc": { // "ref_signal": "single-qubit-mw", // "static_codeword_override": [ @@ -5755,11 +6189,11 @@ // ] // } // }, - "update_ph_park_5 q0": { + "update_ph_step_5": { "duration": @MW_DURATION@, "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], "type": "mw", - "cc_light_instr": "update_ph_park_5", + "cc_light_instr": "update_ph_step_5", "cc": { "ref_signal": "single-qubit-mw", "static_codeword_override": [ @@ -5767,803 +6201,611 @@ ] } }, - "update_ph_park_5 q1": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_5", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 56 - ] - } - }, - "update_ph_park_5 q2": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_5", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 56 - ] - } - }, - "update_ph_park_5 q3": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_5", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 56 - ] - } - }, - "update_ph_park_5 q4": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_5", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 56 - ] - } - }, - "update_ph_park_5 q5": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_5", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 56 - ] - } - }, - "update_ph_park_5 q6": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_5", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 56 - ] - } - }, - "update_ph_park_5 q7": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_5", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 56 - ] - } - }, - "update_ph_park_5 q8": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_5", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 56 - ] - } - }, - "update_ph_park_5 q9": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_5", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 56 - ] - } - }, - "update_ph_park_5 q10": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_5", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 56 - ] - } - }, - "update_ph_park_5 q11": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_5", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 56 - ] - } - }, - "update_ph_park_5 q12": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_5", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 56 - ] - } - }, - "update_ph_park_5 q13": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_5", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 56 - ] - } - }, - "update_ph_park_5 q14": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_5", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 56 - ] - } - }, - "update_ph_park_5 q15": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_5", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 56 - ] - } - }, - "update_ph_park_5 q16": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_5", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 56 - ] - } - }, - "update_ph_park_6 q0": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_6", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 57 - ] - } - }, - "update_ph_park_6 q1": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_6", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 57 - ] - } - }, - "update_ph_park_6 q2": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_6", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 57 - ] - } - }, - "update_ph_park_6 q3": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_6", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 57 - ] - } - }, - "update_ph_park_6 q4": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_6", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 57 - ] - } - }, - "update_ph_park_6 q5": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_6", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 57 - ] - } - }, - "update_ph_park_6 q6": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_6", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 57 - ] - } - }, - "update_ph_park_6 q7": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_6", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 57 - ] - } - }, - "update_ph_park_6 q8": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_6", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 57 - ] - } - }, - "update_ph_park_6 q9": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_6", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 57 - ] - } - }, - "update_ph_park_6 q10": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_6", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 57 - ] - } - }, - "update_ph_park_6 q11": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_6", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 57 - ] - } - }, - "update_ph_park_6 q12": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_6", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 57 - ] - } - }, - "update_ph_park_6 q13": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_6", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 57 - ] - } - }, - "update_ph_park_6 q14": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_6", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 57 - ] - } - }, - "update_ph_park_6 q15": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_6", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 57 - ] - } - }, - "update_ph_park_6 q16": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_6", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 57 - ] - } - }, - "update_ph_park_7 q0": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_7", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 58 - ] - } - }, - "update_ph_park_7 q1": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_7", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 58 - ] - } - }, - "update_ph_park_7 q2": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_7", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 58 - ] - } - }, - "update_ph_park_7 q3": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_7", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 58 - ] - } - }, - "update_ph_park_7 q4": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_7", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 58 - ] - } - }, - "update_ph_park_7 q5": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_7", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 58 - ] - } - }, - "update_ph_park_7 q6": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_7", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 58 - ] - } - }, - "update_ph_park_7 q7": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_7", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 58 - ] - } - }, - "update_ph_park_7 q8": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_7", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 58 - ] - } - }, - "update_ph_park_7 q9": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_7", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 58 - ] - } - }, - "update_ph_park_7 q10": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_7", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 58 - ] - } - }, - "update_ph_park_7 q11": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_7", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 58 - ] - } - }, - "update_ph_park_7 q12": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_7", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 58 - ] - } - }, - "update_ph_park_7 q13": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_7", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 58 - ] - } - }, - "update_ph_park_7 q14": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_7", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 58 - ] - } - }, - "update_ph_park_7 q15": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_7", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 58 - ] - } - }, - "update_ph_park_7 q16": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_7", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 58 - ] - } - }, - "update_ph_park_8 q0": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_8", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 59 - ] - } - }, - "update_ph_park_8 q1": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_8", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 59 - ] - } - }, - "update_ph_park_8 q2": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_8", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 59 - ] - } - }, - "update_ph_park_8 q3": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_8", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 59 - ] - } - }, - "update_ph_park_8 q4": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_8", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 59 - ] - } - }, - "update_ph_park_8 q5": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_8", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 59 - ] - } - }, - "update_ph_park_8 q6": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_8", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 59 - ] - } - }, - "update_ph_park_8 q7": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_8", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 59 - ] - } - }, - "update_ph_park_8 q8": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_8", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 59 - ] - } - }, - "update_ph_park_8 q9": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_8", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 59 - ] - } - }, - "update_ph_park_8 q10": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_8", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 59 - ] - } - }, - "update_ph_park_8 q11": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_8", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 59 - ] - } - }, - "update_ph_park_8 q12": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_8", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 59 - ] - } - }, - "update_ph_park_8 q13": { - "duration": @MW_DURATION@, - "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], - "type": "mw", - "cc_light_instr": "update_ph_park_8", - "cc": { - "ref_signal": "single-qubit-mw", - "static_codeword_override": [ - 59 - ] - } - }, - "update_ph_park_8 q14": { + // "update_ph_step_5 q1": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_5", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 56 + // ] + // } + // }, + // "update_ph_step_5 q2": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_5", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 56 + // ] + // } + // }, + // "update_ph_step_5 q3": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_5", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 56 + // ] + // } + // }, + // "update_ph_step_5 q4": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_5", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 56 + // ] + // } + // }, + // "update_ph_step_5 q5": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_5", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 56 + // ] + // } + // }, + // "update_ph_step_5 q6": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_5", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 56 + // ] + // } + // }, + // "update_ph_step_5 q7": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_5", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 56 + // ] + // } + // }, + // "update_ph_step_5 q8": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_5", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 56 + // ] + // } + // }, + // "update_ph_step_5 q9": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_5", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 56 + // ] + // } + // }, + // "update_ph_step_5 q10": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_5", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 56 + // ] + // } + // }, + // "update_ph_step_5 q11": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_5", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 56 + // ] + // } + // }, + // "update_ph_step_5 q12": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_5", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 56 + // ] + // } + // }, + // "update_ph_step_5 q13": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_5", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 56 + // ] + // } + // }, + // "update_ph_step_5 q14": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_5", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 56 + // ] + // } + // }, + // "update_ph_step_5 q15": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_5", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 56 + // ] + // } + // }, + // "update_ph_step_5 q16": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_5", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 56 + // ] + // } + // }, + "update_ph_step_6": { "duration": @MW_DURATION@, "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], "type": "mw", - "cc_light_instr": "update_ph_park_8", + "cc_light_instr": "update_ph_step_6", "cc": { "ref_signal": "single-qubit-mw", "static_codeword_override": [ - 59 + 57 ] } }, - "update_ph_park_8 q15": { + // "update_ph_step_6 q1": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_6", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 57 + // ] + // } + // }, + // "update_ph_step_6 q2": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_6", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 57 + // ] + // } + // }, + // "update_ph_step_6 q3": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_6", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 57 + // ] + // } + // }, + // "update_ph_step_6 q4": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_6", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 57 + // ] + // } + // }, + // "update_ph_step_6 q5": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_6", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 57 + // ] + // } + // }, + // "update_ph_step_6 q6": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_6", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 57 + // ] + // } + // }, + // "update_ph_step_6 q7": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_6", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 57 + // ] + // } + // }, + // "update_ph_step_6 q8": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_6", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 57 + // ] + // } + // }, + // "update_ph_step_6 q9": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_6", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 57 + // ] + // } + // }, + // "update_ph_step_6 q10": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_6", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 57 + // ] + // } + // }, + // "update_ph_step_6 q11": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_6", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 57 + // ] + // } + // }, + // "update_ph_step_6 q12": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_6", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 57 + // ] + // } + // }, + // "update_ph_step_6 q13": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_6", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 57 + // ] + // } + // }, + // "update_ph_step_6 q14": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_6", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 57 + // ] + // } + // }, + // "update_ph_step_6 q15": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_6", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 57 + // ] + // } + // }, + // "update_ph_step_6 q16": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_6", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 57 + // ] + // } + // }, + "update_ph_step_7": { "duration": @MW_DURATION@, "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], "type": "mw", - "cc_light_instr": "update_ph_park_8", + "cc_light_instr": "update_ph_step_7", "cc": { "ref_signal": "single-qubit-mw", "static_codeword_override": [ - 59 + 58 ] } }, - "update_ph_park_8 q16": { + // "update_ph_step_7 q1": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_7", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 58 + // ] + // } + // }, + // "update_ph_step_7 q2": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_7", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 58 + // ] + // } + // }, + // "update_ph_step_7 q3": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_7", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 58 + // ] + // } + // }, + // "update_ph_step_7 q4": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_7", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 58 + // ] + // } + // }, + // "update_ph_step_7 q5": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_7", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 58 + // ] + // } + // }, + // "update_ph_step_7 q6": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_7", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 58 + // ] + // } + // }, + // "update_ph_step_7 q7": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_7", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 58 + // ] + // } + // }, + // "update_ph_step_7 q8": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_7", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 58 + // ] + // } + // }, + // "update_ph_step_7 q9": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_7", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 58 + // ] + // } + // }, + // "update_ph_step_7 q10": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_7", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 58 + // ] + // } + // }, + // "update_ph_step_7 q11": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_7", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 58 + // ] + // } + // }, + // "update_ph_step_7 q12": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_7", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 58 + // ] + // } + // }, + // "update_ph_step_7 q13": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_7", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 58 + // ] + // } + // }, + // "update_ph_step_7 q14": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_7", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 58 + // ] + // } + // }, + // "update_ph_step_7 q15": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_7", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 58 + // ] + // } + // }, + // "update_ph_step_7 q16": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_7", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 58 + // ] + // } + // }, + "update_ph_step_8": { "duration": @MW_DURATION@, "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], "type": "mw", - "cc_light_instr": "update_ph_park_8", + "cc_light_instr": "update_ph_step_8", "cc": { "ref_signal": "single-qubit-mw", "static_codeword_override": [ @@ -6571,6 +6813,198 @@ ] } }, + // "update_ph_step_8 q1": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_8", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 59 + // ] + // } + // }, + // "update_ph_step_8 q2": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_8", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 59 + // ] + // } + // }, + // "update_ph_step_8 q3": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_8", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 59 + // ] + // } + // }, + // "update_ph_step_8 q4": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_8", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 59 + // ] + // } + // }, + // "update_ph_step_8 q5": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_8", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 59 + // ] + // } + // }, + // "update_ph_step_8 q6": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_8", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 59 + // ] + // } + // }, + // "update_ph_step_8 q7": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_8", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 59 + // ] + // } + // }, + // "update_ph_step_8 q8": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_8", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 59 + // ] + // } + // }, + // "update_ph_step_8 q9": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_8", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 59 + // ] + // } + // }, + // "update_ph_step_8 q10": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_8", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 59 + // ] + // } + // }, + // "update_ph_step_8 q11": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_8", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 59 + // ] + // } + // }, + // "update_ph_step_8 q12": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_8", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 59 + // ] + // } + // }, + // "update_ph_step_8 q13": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_8", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 59 + // ] + // } + // }, + // "update_ph_step_8 q14": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_8", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 59 + // ] + // } + // }, + // "update_ph_step_8 q15": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_8", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 59 + // ] + // } + // }, + // "update_ph_step_8 q16": { + // "duration": @MW_DURATION@, + // "matrix": [ [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0] ], + // "type": "mw", + // "cc_light_instr": "update_ph_step_8", + // "cc": { + // "ref_signal": "single-qubit-mw", + // "static_codeword_override": [ + // 59 + // ] + // } + // }, // END OF AUTOMATICALLY GENERATED SECTION diff --git a/pycqed/measurement/openql_experiments/multi_qubit_oql.py b/pycqed/measurement/openql_experiments/multi_qubit_oql.py index 2da59f7511..3879662e0e 100644 --- a/pycqed/measurement/openql_experiments/multi_qubit_oql.py +++ b/pycqed/measurement/openql_experiments/multi_qubit_oql.py @@ -1,10 +1,10 @@ -from typing import List +from typing import List, Dict, Optional import numpy as np from pycqed.measurement.openql_experiments.openql_helpers import OqlProgram from pycqed.utilities.general import int2base from pycqed.instrument_drivers.meta_instrument.LutMans.flux_lutman import _def_lm as _def_lm_flux - +import itertools def single_flux_pulse_seq(qubit_indices: tuple, platf_cfg: str): p = OqlProgram("single_flux_pulse_seq", platf_cfg) @@ -22,7 +22,6 @@ def single_flux_pulse_seq(qubit_indices: tuple, platf_cfg: str): p.compile() return p - # FIXME: not really used def flux_staircase_seq(platf_cfg: str) -> OqlProgram: p = OqlProgram("flux_staircase_seq", platf_cfg) @@ -47,10 +46,8 @@ def multi_qubit_off_on( qubits: list, initialize: bool, second_excited_state: bool, - platf_cfg: str, - nr_flux_dance: int = None, - wait_time: float = None -) -> OqlProgram: + platf_cfg: str + ) -> OqlProgram: """ Performs an 'off_on' sequence on the qubits specified. off: (RO) - prepz - - - RO @@ -81,29 +78,16 @@ def multi_qubit_off_on( for i, comb in enumerate(combinations): k = p.create_kernel('Prep_{}'.format(comb)) - # 1. Prepare qubits in 0 for q in qubits: k.prepz(q) k.barrier([]) - # 2. post-selection extra init readout if initialize: for q in qubits: k.measure(q) k.barrier(qubits) - - if nr_flux_dance: - for i in range(int(nr_flux_dance)): - for step in [1, 2, 3, 4]: - # if refocusing: - # k.gate(f'flux-dance-{step}-refocus', [0]) - # else: - k.gate(f'flux-dance-{step}', [0]) - k.barrier([]) # alignment - k.gate("wait", [], wait_time) - - # 3. prepare desired state + # 3. prepare desired state for state, target_qubit in zip(comb, qubits): # N.B. last is LSQ if state == '0': k.gate('i', [target_qubit]) @@ -130,7 +114,7 @@ def single_qubit_off_on( qtarget, initialize: bool, platf_cfg: str -) -> OqlProgram: + ) -> OqlProgram: n_qubits = len(qubits) comb_0 = '0' * n_qubits comb_1 = comb_0[:qubits.index(qtarget)] + '1' + comb_0[qubits.index(qtarget) + 1:] @@ -179,7 +163,7 @@ def targeted_off_on( q_target: int, pulse_comb: str, platf_cfg: str -) -> OqlProgram: + ) -> OqlProgram: """ Performs an 'off_on' sequence on the qubits specified. off: prepz - - RO @@ -239,14 +223,15 @@ def targeted_off_on( return p + def Msmt_induced_dephasing_ramsey( q_rams: list, - q_meas: int, + q_meas: list, meas_time: int, platf_cfg: str, echo_times: list = None, - exception_qubits: list = None -) -> OqlProgram: + exception_qubits: list = None, + ) -> OqlProgram: """ q_target is ramseyed q_spec is measured @@ -261,34 +246,69 @@ def Msmt_induced_dephasing_ramsey( cw_idx = angle//20 + 9 k = p.create_kernel(f"Ramsey_meas_{meas}_{angle}_{state}") - for q in q_rams: + for q in q_rams+q_meas: k.prepz(q) - k.prepz(q_meas) k.barrier([]) if state == '1': - k.gate('rx180',[q_meas]) - for q in q_rams: + for q in q_meas: + k.gate('rx180',[q]) + for q in q_rams: k.gate('rx90', [q]) k.barrier([]) - if meas == True: - k.measure(q_meas) - else: - k.gate('wait', [q_meas], meas_time) + for q in q_meas: + if meas == True: + k.measure(q) + else: + k.gate('wait', [q], meas_time) if echo_times != None: - for q, t in zip(q_rams, echo_times): - k.gate('cw_30', [q]) - k.gate('wait', [q], t) - k.barrier([]) - - for q in q_rams: + for j, q in enumerate(q_rams): + if echo_times == 'XmX': + for cycle in range(int(meas_time/2/20)): + k.gate('rx180', [q]) + k.gate('rXm180', [q]) + elif echo_times == 'YmY': + for cycle in range(int(meas_time/2/20)): + k.gate('rY180', [q]) + k.gate('rYm180', [q]) + elif echo_times == 'XY': + for cycle in range(int(meas_time/6/20)): + k.gate('i', [q]) + k.gate('rx180', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('rY180', [q]) + k.gate('i', [q]) + elif echo_times == 'XYmYmX': + for cycle in range(int(meas_time/4/20)): + k.gate('rx180', [q]) + k.gate('rY180', [q]) + k.gate('rYm180', [q]) + k.gate('rXm180', [q]) + elif echo_times == 'XY8': + for cycle in range(int(meas_time/8/20)): + k.gate('rx180', [q]) + k.gate('rY180', [q]) + k.gate('rx180', [q]) + k.gate('rY180', [q]) + k.gate('rY180', [q]) + k.gate('rx180', [q]) + k.gate('rY180', [q]) + k.gate('rx180', [q]) + + else: + k.gate('cw_30', [q]) + k.gate('wait', [q], echo_times[j]) + k.barrier([]) + for q in q_rams: k.gate('cw_{:02}'.format(cw_idx), [q]) - k.gate("wait", [q], 600) # To prevent UHF from missing shots + k.gate("wait", [q], 1000) # To prevent UHF from missing shots k.measure(q) k.barrier([]) if meas == True and exception_qubits != None: for q in exception_qubits: + k.gate("wait", [q], 1000) # To prevent UHF from missing shots k.measure(q) p.add_kernel(k) @@ -304,7 +324,6 @@ def Msmt_induced_dephasing_ramsey( return p - def two_qubit_off_on(q0: int, q1: int, platf_cfg: str): ''' off_on sequence on two qubits. @@ -375,7 +394,7 @@ def two_qubit_AllXY( sequence_type='sequential', replace_q1_pulses_with: str = None, repetitions: int = 1 -) -> OqlProgram: + ) -> OqlProgram: """ AllXY sequence on two qubits. Has the option of replacing pulses on q1 with pi pulses @@ -479,7 +498,7 @@ def residual_coupling_sequence( q_spectator_idx: list, spectator_state: str, platf_cfg: str -) -> OqlProgram: + ) -> OqlProgram: """ Sequence to measure the residual (ZZ) interaction between two qubits. Procedure is described in M18TR. @@ -505,7 +524,7 @@ def residual_coupling_sequence( gate_spec = [s.replace('0', 'i').replace('1', 'rx180') for s in spectator_state] - for i, time in enumerate(times[:-4]): + for i, time in enumerate(times[:-2]): k = p.create_kernel("residual_coupling_seq_{}".format(i)) k.prepz(q0) @@ -513,29 +532,15 @@ def residual_coupling_sequence( k.prepz(q_s) wait_nanoseconds = int(round(time / 1e-9)) k.gate('rx90', [q0]) - - # wait + for i_s, q_s in enumerate(q_spectator_idx): + k.gate(gate_spec[i_s], [q_s]) k.gate("wait", all_qubits, wait_nanoseconds) - - # Echo pulse on ramsey qubit, apply selected gate again to cancel effect k.gate('rx180', [q0]) for i_s, q_s in enumerate(q_spectator_idx): k.gate(gate_spec[i_s], [q_s]) - - # wait k.gate("wait", all_qubits, wait_nanoseconds) - - # Transform ramsey qubit state to preferred basis - # angle = (i*40) % 360 - # cw_idx = angle//20 + 9 - # k.gate('cw_{:02}'.format(cw_idx), [q0]) - k.gate('ry90', [q0]) # k.gate('rxm90', [q0]) - for i_s, q_s in enumerate(q_spectator_idx): - k.gate(gate_spec[i_s], [q_s]) - k.gate('wait', [], 0) - - # Measure qubits + k.gate('ry90', [q0]) k.measure(q0) for q_s in q_spectator_idx: k.measure(q_s) @@ -556,7 +561,7 @@ def FluxTimingCalibration( platf_cfg: str, flux_cw: str = 'fl_cw_02', # FIXME: unused cal_points: bool = True -) -> OqlProgram: + ) -> OqlProgram: """ A Ramsey sequence with varying waiting times `times` around a flux pulse. """ @@ -580,7 +585,12 @@ def FluxTimingCalibration( p.add_kernel(k) if cal_points: - p.add_single_qubit_cal_points(qubit_idx=qubit_idx) # FIXME: unresolved, use multi iso single? + cal_states = ['{:0{}b}'.format(i, len(qubit_idxs)) for i in range(2**len(qubit_idxs))] + p.add_multi_q_cal_points( + qubits=qubit_idxs, + combinations=cal_states + ) + # p.add_single_qubit_cal_points(qubit_idx=qubit_idx) # FIXME: unresolved p.compile() return p @@ -591,8 +601,9 @@ def Cryoscope( twoq_pair=[2, 0], platf_cfg: str = '', cc: str = 'CC', + wait_time_flux: int = 0, double_projections: bool = True -) -> OqlProgram: + ) -> OqlProgram: """ Single qubit Ramsey sequence. Writes output files to the directory specified in openql. @@ -609,24 +620,17 @@ def Cryoscope( p = OqlProgram("Cryoscope", platf_cfg) - # FIXME: the variables created here are effectively unused - if cc.upper() == 'CCL': - flux_target = twoq_pair - elif cc.upper() == 'QCC' or cc.upper() == 'CC': - cw_idx = int(flux_cw[-2:]) - flux_cw = 'sf_{}'.format(_def_lm_flux[cw_idx]['name'].lower()) - else: - raise ValueError('CC type not understood: {}'.format(cc)) - k = p.create_kernel("RamZ_X") k.prepz(qubit_idxs[0]) k.barrier([]) # alignment workaround for q_idx in qubit_idxs: k.gate('rx90', [q_idx]) + k.gate('wait', [], wait_time_flux) k.barrier([]) # alignment workaround for q_idx in qubit_idxs: - k.gate('sf_square', [q_idx]) + k.gate(flux_cw, [q_idx]) k.barrier([]) # alignment workaround + k.gate('wait', [], wait_time_flux) for q_idx in qubit_idxs: k.gate('rx90', [q_idx]) k.barrier([]) @@ -639,10 +643,12 @@ def Cryoscope( k.barrier([]) # alignment workaround for q_idx in qubit_idxs: k.gate('rx90', [q_idx]) + k.gate('wait', [], wait_time_flux) k.barrier([]) # alignment workaround for q_idx in qubit_idxs: - k.gate('sf_square', [q_idx]) + k.gate(flux_cw, [q_idx]) k.barrier([]) # alignment workaround + k.gate('wait', [], wait_time_flux) for q_idx in qubit_idxs: k.gate('ry90', [q_idx]) k.barrier([]) @@ -656,10 +662,12 @@ def Cryoscope( k.barrier([]) # alignment workaround for q_idx in qubit_idxs: k.gate('rx90', [q_idx]) + k.gate('wait', [], wait_time_flux) k.barrier([]) # alignment workaround for q_idx in qubit_idxs: - k.gate('sf_square', [q_idx]) + k.gate(flux_cw, [q_idx]) k.barrier([]) # alignment workaround + k.gate('wait', [], wait_time_flux) for q_idx in qubit_idxs: k.gate('rxm90', [q_idx]) k.barrier([]) @@ -672,10 +680,12 @@ def Cryoscope( k.barrier([]) # alignment workaround for q_idx in qubit_idxs: k.gate('rx90', [q_idx]) + k.gate('wait', [], wait_time_flux) k.barrier([]) # alignment workaround for q_idx in qubit_idxs: - k.gate('sf_square', [q_idx]) + k.gate(flux_cw, [q_idx]) k.barrier([]) # alignment workaround + k.gate('wait', [], wait_time_flux) for q_idx in qubit_idxs: k.gate('rym90', [q_idx]) k.barrier([]) @@ -687,6 +697,31 @@ def Cryoscope( return p +def Cryoscope_long( + qubit: int, + times_ns: list, + t_total_ns: int, + platf_cfg: str = None): + ''' + Implements sequence for Spectrocopy type cryoscope. + ''' + p = OqlProgram("Cryoscope_long", platf_cfg) + for t_ns in times_ns: + k = p.create_kernel(f"wait_time_{t_ns}ns") + # Preparation + k.prepz(qubit) + # In this routine, we want to trigger flux codeword 1 + # as of the time of writting this, points to fl_cw 01 + k.gate('sf_cz_ne', [qubit]) + k.gate("wait", [qubit], t_ns) + k.gate('ry180', [qubit]) + k.gate("wait", [qubit], t_total_ns-t_ns+1000) + k.measure(qubit) + p.add_kernel(k) + p.compile() + return p + + # FIXME: not really used def CryoscopeGoogle(qubit_idx: int, buffer_time1, times, platf_cfg: str) -> OqlProgram: """ @@ -733,7 +768,7 @@ def fluxed_ramsey( wait_time: float, flux_cw: str = 'fl_cw_02', platf_cfg: str = '' -) -> OqlProgram: + ) -> OqlProgram: """ Single qubit Ramsey sequence. Writes output files to the directory specified in openql. @@ -781,15 +816,13 @@ def fluxed_ramsey( # FIMXE: merge into the real chevron seq - - def Chevron_hack( qubit_idx: int, qubit_idx_spec, buffer_time, buffer_time2, platf_cfg: str -) -> OqlProgram: + ) -> OqlProgram: """ Single qubit Ramsey sequence. Writes output files to the directory specified in openql. @@ -829,14 +862,15 @@ def Chevron_hack( def Chevron( qubit_idx: int, qubit_idx_spec: int, - qubit_idx_parks: int, # FIXME: incorrect type + qubit_idx_parks: Optional[List[int]], # FIXME: incorrect type buffer_time, buffer_time2, flux_cw: int, platf_cfg: str, target_qubit_sequence: str = 'ramsey', cc: str = 'CCL', - recover_q_spec: bool = False -) -> OqlProgram: + recover_q_spec: bool = False, + second_excited_state: bool = False, + ) -> OqlProgram: """ Writes output files to the directory specified in openql. Output directory is set as an attribute to the program for convenience. @@ -853,6 +887,7 @@ def Chevron( or excite it iat the beginning of the sequnce ('excited') recover_q_spec (bool): applies the first gate of qspec at the end as well if `True` + second_excited_state (bool): Applies f12 transition pulse before flux pulse. Returns: p: OpenQL Program object containing @@ -867,6 +902,8 @@ def Chevron( q0 -x180-flux-x180-RO- qspec ----------------RO- (target_qubit_sequence='ground') + q0 -x180-x12-flux-x12-x180-RO- + qspec ------------------------RO- (second_excited_state=True) """ p = OqlProgram("Chevron", platf_cfg) @@ -893,7 +930,8 @@ def Chevron( k.gate(spec_gate, [qubit_idx_spec]) k.gate('rx180', [qubit_idx]) - + if second_excited_state: + k.gate('rx12', [qubit_idx]) if buffer_nanoseconds > 0: k.gate("wait", [qubit_idx], buffer_nanoseconds) @@ -918,9 +956,10 @@ def Chevron( if buffer_nanoseconds2 > 0: k.gate('wait', [qubit_idx], buffer_nanoseconds2) - + if second_excited_state: + k.gate('rx12', [qubit_idx]) k.gate('rx180', [qubit_idx]) - k.gate('rx180', [qubit_idx_spec]) + k.gate(spec_gate, [qubit_idx_spec]) if recover_q_spec: k.gate(spec_gate, [qubit_idx_spec]) @@ -941,7 +980,7 @@ def two_qubit_ramsey( qubit_idx_spec: int, platf_cfg: str, target_qubit_sequence: str = 'excited' -) -> OqlProgram: + ) -> OqlProgram: """ Writes output files to the directory specified in openql. Output directory is set as an attribute to the program for convenience. @@ -1010,7 +1049,7 @@ def two_qubit_tomo_bell( platf_cfg, wait_after_flux: float = None, flux_codeword: str = 'cz' -) -> OqlProgram: + ) -> OqlProgram: ''' Two qubit bell state tomography. @@ -1084,7 +1123,7 @@ def two_qubit_tomo_bell_by_waiting( q1, platf_cfg, wait_time: int = 20 -) -> OqlProgram: + ) -> OqlProgram: ''' Two qubit (bell) state tomography. There are no flux pulses applied, only waiting time. It is supposed to take advantage of residual ZZ to @@ -1144,116 +1183,6 @@ def two_qubit_tomo_bell_by_waiting( p.compile() return p - -# FIXME: not really used -def two_qubit_DJ(q0, q1, platf_cfg) -> OqlProgram: - ''' - Two qubit Deutsch-Josza. - - Args: - q0, q1 (str): names of the target qubits - ''' - - p = OqlProgram("two_qubit_DJ", platf_cfg) - - # experiments - # 1 - k = p.create_kernel("DJ1") - k.prepz(q0) # to ensure enough separation in timing - k.prepz(q1) # to ensure enough separation in timing - # prerotations - k.gate('ry90', [q0]) - k.gate('rym90', [q1]) - # post rotations - k.gate('ry90', [q0]) - k.gate('ry90', [q1]) - # measure - k.measure(q0) - k.measure(q1) - p.add_kernel(k) - - # 2 - k = p.create_kernel("DJ2") - k.prepz(q0) # to ensure enough separation in timing - k.prepz(q1) # to ensure enough separation in timing - # prerotations - k.gate('ry90', [q0]) - k.gate('rym90', [q1]) - # rotations - k.gate('rx180', [q1]) - # post rotations - k.gate('ry90', [q0]) - k.gate('ry90', [q1]) - # measure - k.measure(q0) - k.measure(q1) - p.add_kernel(k) - - # 3 - k = p.create_kernel("DJ3") - k.prepz(q0) # to ensure enough separation in timing - k.prepz(q1) # to ensure enough separation in timing - # prerotations - k.gate('ry90', [q0]) - k.gate('rym90', [q1]) - # rotations - k.gate('ry90', [q1]) - k.gate('rx180', [q0]) - k.gate('rx180', [q1]) - - # Hardcoded flux pulse, FIXME use actual CZ - k.barrier([]) # alignment workaround - k.gate('wait', [2, 0], 100) - k.gate('fl_cw_01', [2, 0]) - # FIXME hardcoded extra delays - k.gate('wait', [2, 0], 200) - k.barrier([]) # alignment workaround - - k.gate('rx180', [q0]) - k.gate('ry90', [q1]) - - # post rotations - k.gate('ry90', [q0]) - k.gate('ry90', [q1]) - # measure - k.measure(q0) - k.measure(q1) - p.add_kernel(k) - - # 4 - k = p.create_kernel("DJ4") - k.prepz(q0) # to ensure enough separation in timing - k.prepz(q1) # to ensure enough separation in timing - # prerotations - k.gate('ry90', [q0]) - k.gate('rym90', [q1]) - # rotations - k.gate('rym90', [q1]) - # Hardcoded flux pulse, FIXME use actual CZ - k.barrier([]) # alignment workaround - k.gate('wait', [2, 0], 100) - k.gate('fl_cw_01', [2, 0]) - # FIXME hardcoded extra delays - k.gate('wait', [2, 0], 200) - k.barrier([]) # alignment workaround - - k.gate('rx180', [q1]) - k.gate('rym90', [q1]) - - # post rotations - k.gate('ry90', [q0]) - k.gate('ry90', [q1]) - # measure - k.measure(q0) - k.measure(q1) - p.add_kernel(k) - - # 7 repetitions is because of assumptions in tomo analysis - p.add_two_q_cal_points(q0=q0, q1=q1, reps_per_cal_pt=7) - p.compile() - return p - - def single_qubit_parity_check( qD: int, qA: int, @@ -1263,7 +1192,7 @@ def single_qubit_parity_check( initial_states=['0', '1'], flux_codeword: str = 'cz', parity_axis='Z' -) -> OqlProgram: + ) -> OqlProgram: """ Implements a circuit for repeated parity checks. @@ -1350,7 +1279,7 @@ def two_qubit_parity_check( idling_time: float = 40e-9, idling_time_echo: float = 20e-9, idling_rounds: int = 0 -) -> OqlProgram: + ) -> OqlProgram: """ Implements a circuit for repeated parity checks on two qubits. @@ -1559,7 +1488,7 @@ def conditional_oscillation_seq( flux_codeword_park: str = None, parked_qubit_seq: str = 'ground', disable_parallel_single_q_gates: bool = False -) -> OqlProgram: + ) -> OqlProgram: ''' Sequence used to calibrate flux pulses for CZ gates. @@ -1606,6 +1535,8 @@ def conditional_oscillation_seq( k.barrier([]) # alignment workaround + k.gate('wait', [q0], 1000000) + k.barrier([]) # ################################################################# # Single qubit ** parallel ** gates before flux pulses # ################################################################# @@ -1743,7 +1674,6 @@ def conditional_oscillation_seq( qubits = [q0, q1] if q2 is None else [q0, q1, q2] p.add_multi_q_cal_points( qubits=qubits, - f_state_cal_pt_cw=31, combinations=states) p.compile() @@ -1862,6 +1792,11 @@ def conditional_oscillation_seq_multi( if flux_codeword == 'cz': for q0, q1 in zip(Q_idxs_target, Q_idxs_control): k.gate(flux_codeword, [q0, q1]) + elif flux_codeword == 'sf_square': + for q0, q1 in zip(Q_idxs_target, Q_idxs_control): + k.gate(flux_codeword, [q0]) + for q in Q_idxs_parked: + k.gate('sf_park', [q]) else: k.gate(flux_codeword, [0]) # k.gate('sf_cz_ne', [3]) @@ -1888,7 +1823,7 @@ def conditional_oscillation_seq_multi( if case == "excitation": for q_idx in Q_idxs_control: - k.gate("rx180", [q_idx]) + k.gate("rxm180", [q_idx]) # cw_idx corresponds to special hardcoded angles in the lutman # special because the cw phase pulses go in mult of 20 deg @@ -1929,7 +1864,6 @@ def conditional_oscillation_seq_multi( qubits = Q_idxs_target + Q_idxs_control p.add_multi_q_cal_points( qubits=qubits, - f_state_cal_pt_cw=31, combinations=states) p.compile() @@ -1945,6 +1879,89 @@ def conditional_oscillation_seq_multi( return p +def parity_check_ramsey( + Q_idxs_target, + Q_idxs_control, + control_cases, + flux_cw_list, + platf_cfg, + angles, + nr_spectators: int=0, + pc_repetitions: int=1, + wait_time_before_flux: int = 0, + wait_time_after_flux: int = 0 + ): + + p = OqlProgram("Parity_check_ramsey", platf_cfg) + + for case in control_cases: + for i, angle in enumerate(angles): + k = p.create_kernel("{}_{}".format(case, angle)) + # Preparation + for q in Q_idxs_target+Q_idxs_control: + k.prepz(q) + k.barrier([]) + # Single qubit gates + for j, state in enumerate(case): + if state == '1': + k.gate("rx180", [Q_idxs_control[j]]) + elif state == '2': + k.gate("rx180", [Q_idxs_control[j]]) + k.gate("rx12", [Q_idxs_control[j]]) + for q in Q_idxs_target: + k.gate("rx90", [q]) + k.barrier([]) # alignment workaround + # Flux pulses + k.gate('wait', [], wait_time_before_flux) + for j in range(pc_repetitions): + for l, flux_cw in enumerate(flux_cw_list): + if 'cz' in flux_cw: + if len(flux_cw_list) == len(Q_idxs_control)-nr_spectators: + k.gate(flux_cw, [Q_idxs_target[0], Q_idxs_control[l]]) + elif len(flux_cw_list) == len(Q_idxs_target): + k.gate(flux_cw, [Q_idxs_target[l], Q_idxs_control[0]]) + else: + raise('Flux cw list is not valid.') + else: + k.gate(flux_cw, [0]) + k.gate('wait', [], wait_time_after_flux) + k.barrier([]) + # Single qubit gates + for j, state in enumerate(case): + if state == '2': + k.gate("rx12", [Q_idxs_control[j]]) + k.gate("rxm180", [Q_idxs_control[j]]) + if state == '1': + k.gate("rxm180", [Q_idxs_control[j]]) + # cw_idx corresponds to special hardcoded angles in the lutman + # special because the cw phase pulses go in mult of 20 deg + cw_idx = angle // 20 + 9 + phi_gate = 'cw_{:02}'.format(cw_idx) + for q in Q_idxs_target: + k.gate(phi_gate, [q]) + k.barrier([]) + # k.gate('wait', [], 40) + + # Measurement + for q in Q_idxs_target+Q_idxs_control: + k.measure(q) + k.barrier([]) + p.add_kernel(k) + + qubits = Q_idxs_target + Q_idxs_control + cal_states = ['{:0{}b}'.format(i, len(qubits)) for i in range(2**len(qubits))] + p.add_multi_q_cal_points( + qubits=qubits, + combinations=cal_states + ) + p.compile() + + cal_pts_idx = np.arange(len(control_cases),len(cal_states)+len(control_cases)) + p.sweep_points = np.concatenate([np.repeat(np.arange(len(control_cases)), len(angles)), + cal_pts_idx]) + return p + + def parity_check_flux_dance( Q_idxs_target: List[int], Q_idxs_control: List[int], @@ -1952,7 +1969,10 @@ def parity_check_flux_dance( flux_cw_list: List[str], Q_idxs_ramsey: List[int] = None, Q_idxs_parking: List[int] = None, + disable_pc: bool = False, + disabled_pc_duration: int = 40, nr_flux_dance_before_cal_points: int = None, + pc_repetitions: int = 1, platf_cfg: str = None, angles: np.ndarray = np.arange(0, 360, 20), initialization_msmt: bool = False, @@ -2033,19 +2053,30 @@ def parity_check_flux_dance( # ################################################################# # Flux pulses # ################################################################# - # k.gate('wait', [], wait_time_before_flux) + k.gate('wait', [], wait_time_before_flux) - for flux_cw in flux_cw_list: - k.gate(flux_cw, [0]) + if not disable_pc: + for dummy_i in range(pc_repetitions): + for flux_cw in flux_cw_list: + k.gate(flux_cw, [0]) + else: + k.barrier([]) + k.gate('wait', [], disabled_pc_duration) + k.barrier([]) k.barrier([]) - # k.gate('wait', [], wait_time_after_flux) + k.gate('wait', [], wait_time_after_flux) # ################################################################# # Single qubit gates post flux pulses # ################################################################# + for qb in Q_idxs_target: + k.gate("update_ph_park_1", [qb]) + + k.barrier([]) + for i, indx in enumerate(case): if indx == '1': - k.gate("rxm180", [Q_idxs_control[i]]) + k.gate("rx180", [Q_idxs_control[i]]) # cw_idx corresponds to special hardcoded angles in the lutman # special because the cw phase pulses go in mult of 20 deg @@ -2092,7 +2123,6 @@ def parity_check_flux_dance( p.add_multi_q_cal_points( qubits=qubits if not Q_idxs_parking else qubits + Q_idxs_parking, - f_state_cal_pt_cw=31, combinations=cal_states, nr_flux_dance=nr_flux_dance_before_cal_points, flux_cw_list=flux_cw_list if nr_flux_dance_before_cal_points else None @@ -2111,7 +2141,7 @@ def parity_check_flux_dance( return p -def parity_check_fidelity( +def parity_check_fidelity_old( Q_idxs_target: List[str], Q_idxs_control: List[str], control_cases: List[str], @@ -2129,7 +2159,7 @@ def parity_check_fidelity( Pairs : contains all the gates gates with q0 is the target and q1 is the control. - parking qbs: includes all qubits to be parked. + parking qbs: includes all qubits to be parked. Timing of the sequence: q0: X90 -- C-Phase (repet. C-Phase) Rphi90 RO @@ -2140,7 +2170,7 @@ def parity_check_fidelity( Args: pairs : contains all the gates gates with q0 is the target and q1 is the control. - parking qbs: includes all qubits to be parked. + parking qbs: includes all qubits to be parked. flux_codeword (str): the gate to be applied to the qubit pair q0, q1 @@ -2156,9 +2186,9 @@ def parity_check_fidelity( p = OqlProgram("parity_check_fidelity", platf_cfg) for case in control_cases: - + # k = p.create_kernel("{}".format(case), p) k = p.create_kernel("{}".format(case)) - + # ################################################################# # State preparation # ################################################################# @@ -2168,7 +2198,7 @@ def parity_check_fidelity( if initialization_msmt: for qb in Q_idxs_target + Q_idxs_control: - k.measure(qb) + k.measure(qb) k.gate("wait", [], 0) for i, indx in enumerate(case): @@ -2194,6 +2224,11 @@ def parity_check_fidelity( k.barrier([]) k.gate('wait', [], wait_time_after_flux) + + for qb in Q_idxs_target: + k.gate("update_ph_park_1", [qb]) + + k.barrier([]) # ################################################################# # Single qubit gates post flux pulses # ################################################################# @@ -2222,176 +2257,316 @@ def parity_check_fidelity( return p -def Weight_4_parity_tomography( + +def parity_check_fidelity( + Q_ancilla_idx: List[str], + Q_control_idx: List[str], + control_cases: List[str], + flux_cw_list: List[str], + initialization_msmt: bool = False, + wait_time_before_flux: int = 0, + wait_time_after_flux: int = 0, + platf_cfg: str = None + ): + ''' + Performs parity check using ancilla and control qubits. + ''' + p = OqlProgram("parity_check_fidelity", platf_cfg) + for case in control_cases: + k = p.create_kernel("{}".format(case)) + #################### + # State preparation + #################### + for q in Q_ancilla_idx + Q_control_idx: + k.prepz(q) + k.barrier([]) + if initialization_msmt: + for q in Q_ancilla_idx + Q_control_idx: + k.measure(q) + k.gate("wait", [], 0) + ##################### + # Single qubit gates + ##################### + for i, state in enumerate(case): + if state == '1': + k.gate("rx180", [Q_control_idx[i]]) + for q in Q_ancilla_idx: + k.gate("rym90", [q]) + k.barrier([]) + ############## + # Flux pulses + ############## + k.gate('wait', [], wait_time_before_flux) + for i, flux_cw in enumerate(flux_cw_list): + if 'cz' in flux_cw: + k.gate(flux_cw, [Q_ancilla_idx[0], Q_control_idx[i]]) + else: + k.gate(flux_cw, [0]) + k.gate('wait', [], wait_time_after_flux) + # k.barrier([]) + ##################### + # Single qubit gates + ##################### + for i, state in enumerate(case): + if state == '1': + k.gate("rxm180", [Q_control_idx[i]]) + for q_idx in Q_ancilla_idx: + k.gate("ry90", [q_idx]) + k.barrier([]) + ############## + # Measurement + ############## + for q in Q_ancilla_idx: + k.measure(q) + if initialization_msmt: + for q in Q_control_idx: + k.measure(q) + p.add_kernel(k) + # Add calibration points + Q_total = Q_ancilla_idx + if initialization_msmt: + Q_total += Q_control_idx + n = len(Q_total) + states = ['0','1'] + combinations = [''.join(s) for s in itertools.product(states, repeat=n)] + p.add_multi_q_cal_points(qubits=Q_total, + combinations=combinations, + reps_per_cal_pnt=1) + p.compile() + return p + + +def Weight_n_parity_tomography( Q_anc: int, - Q_D1: int, - Q_D2: int, - Q_D3: int, - Q_D4: int, + Q_D: list, platf_cfg: str, - simultaneous_measurement: bool=True + flux_cw_list: List[str], + Q_exception: List[str], + wait_time_before_flux: int = 0, + initialization_msmt: bool = False, + wait_time_after_flux: int = 0, + simultaneous_measurement: bool=True, + n_rounds=1, + readout_duration_ns: int = 340, + mw_duration_ns: int = 20, ): - p = OqlProgram("Weight_4_parity_tomography", platf_cfg) - all_Q_idxs = [Q_anc, Q_D1, Q_D2, Q_D3, Q_D4] + p = OqlProgram("Weight_n_parity_tomography", platf_cfg) + + if ((readout_duration_ns-mw_duration_ns)/2)%mw_duration_ns != 0: + print('Warning: Readout duration is not multiple of\ + Dynamical decoupling block sequence!') + + n = len(Q_D) + all_Q_idxs = [Q_anc] + Q_D tomo_gates = {'Z': 'i', 'X': 'rym90', 'Y': 'rx90'} - for op1, g1 in tomo_gates.items(): - for op2, g2 in tomo_gates.items(): - for op3, g3 in tomo_gates.items(): - for op4, g4 in tomo_gates.items(): + ops = ['Z','X','Y'] + Operators = [''.join(op) for op in itertools.product(ops, repeat=n)] + for op in Operators: + k = p.create_kernel(f'{op}') - k = oqh.create_kernel(f'Tomo_{op1+op2+op3+op4}', p) + for q in all_Q_idxs: + k.prepz(q) + k.barrier([]) - for q in all_Q_idxs: - k.prepz(q) - k.gate("ry90", [q]) + if initialization_msmt: + for q in all_Q_idxs: + k.measure(q) + k.gate("wait", [], 0) - k.gate("wait", [], 0) - k.gate("flux-dance-1-refocus", [0]) - k.gate("flux-dance-2-refocus", [0]) - k.gate("flux-dance-3-refocus", [0]) - k.gate("flux-dance-4-refocus", [0]) - k.gate("wait", [], 0) + for i in range(n_rounds-1): + for q in all_Q_idxs: + k.gate("ry90", [q]) + k.gate('wait', [], wait_time_before_flux) + for i, flux_cw in enumerate(flux_cw_list): + if 'cz' in flux_cw: + k.gate(flux_cw, [Q_anc, Q_D[i]]) + else: + k.gate(flux_cw, [0]) + k.gate('wait', [], wait_time_after_flux) + k.barrier([]) + for q in all_Q_idxs: + k.gate("rym90", [q]) + k.barrier([]) + k.measure(Q_anc) + for q in Q_D: + for cycle in range(int((readout_duration_ns-mw_duration_ns)/2/mw_duration_ns)): + k.gate('i', [q]) + k.gate('rx180', [q]) + for cycle in range(int((readout_duration_ns-mw_duration_ns)/2/mw_duration_ns)): + k.gate('i', [q]) + k.gate("wait", [], 0) - for q in all_Q_idxs: - k.gate("rym90", [q]) - k.gate("wait", [], 0) - - k.measure(Q_anc) - - if not simultaneous_measurement: - k.gate("cw_30", [2]) - k.gate('wait', [2], 360) - k.gate("cw_30", [0]) - k.gate('wait', [0], 380) - k.gate("cw_30", [13]) - k.gate('wait', [13], 280) - k.gate("cw_30", [16]) - k.gate('wait', [16], 320) - k.gate("wait", [], 0) - - for q, g in zip([Q_D1, Q_D2, Q_D3, Q_D4], [g1, g2, g3, g4]): - k.gate(g, [q]) - k.measure(q) - k.gate("wait", [], 0) + for q in all_Q_idxs: + k.gate("ry90", [q]) + + k.gate('wait', [], wait_time_before_flux) + for i, flux_cw in enumerate(flux_cw_list): + if 'cz' in flux_cw: + k.gate(flux_cw, [Q_anc, Q_D[i]]) + else: + k.gate(flux_cw, [0]) + k.gate('wait', [], wait_time_after_flux) + k.barrier([]) - if not simultaneous_measurement: - k.measure(Q_D4) + for q in all_Q_idxs: + k.gate("rym90", [q]) + k.gate("wait", [], 0) + + k.measure(Q_anc) + + if not simultaneous_measurement: + for q in Q_D: + for cycle in range(int((readout_duration_ns-mw_duration_ns)/2/mw_duration_ns)): + k.gate('i', [q]) + k.gate('rx180', [q]) + for cycle in range(int((readout_duration_ns-mw_duration_ns)/2/mw_duration_ns)): + k.gate('i', [q]) + k.gate("wait", [], 0) + + for q, g in zip(Q_D, [tomo_gates[o] for o in op]): + k.gate(g, [q]) + if not simultaneous_measurement: + k.gate("wait", [q], 200) + k.measure(q) + k.gate("wait", [], 0) + + # Fill up measurement register for exception qubits + if not simultaneous_measurement: + for q in Q_exception: + k.gate("wait", [q], 500) + k.measure(q) + for i in range(n_rounds-1): + for q in Q_exception: + k.gate("wait", [q], 500) + k.measure(q) - p.add_kernel(k) + p.add_kernel(k) # Calibration points - combinations = [ s1+s2+s3+s4+s5 for s1 in ['0', '1'] - for s2 in ['0', '1'] - for s3 in ['0', '1'] - for s4 in ['0', '1'] - for s5 in ['0', '1'] ] - oqh.add_multi_q_cal_points(p, - qubits=[Q_anc, Q_D1, Q_D2, Q_D3, Q_D4], - combinations=combinations, - reps_per_cal_pnt=1) - p = oqh.compile(p) + states = ['0','1'] + combinations = [''.join(s) for s in itertools.product(states, repeat=n+1)] + p.add_multi_q_cal_points(qubits=all_Q_idxs, + combinations=combinations, + reps_per_cal_pnt=1) + p.compile() return p + def Parity_Sandia_benchmark( qA: int, QDs: list, + flux_cw_list: List[str], + wait_time_before_flux: int = 0, + wait_time_after_flux: int = 0, platf_cfg: str = None): - ''' - Sandia's weight-4 parity check benchmark protocol. - ''' - delays = {} - p = OqlProgram("Sandia_parity_benchmark", platf_cfg) - - # lb = ["P_0000","P_1111","Single_parity_check","Double_parity_check"] - # for i,ks in enumerate(lb): - - k = p.create_kernel("P_0000") - all_q_idxs = QDs+[qA] - for q_idx in all_q_idxs: - k.prepz(q_idx) - k.measure(q_idx) - p.add_kernel(k) - - k = p.create_kernel("P_1111") - all_q_idxs = QDs+[qA] - for q_idx in all_q_idxs: - k.prepz(q_idx) - k.gate("rx180", [q_idx]) - k.measure(q_idx) - p.add_kernel(k) - - k = p.create_kernel("Single_parity_check") - all_q_idxs = QDs+[qA] - for q_idx in all_q_idxs: - k.prepz(q_idx) - k.gate("ry90", [q_idx]) - k.barrier([]) - k.gate("flux_dance_refocus_1", [0]) - k.gate("flux_dance_refocus_2", [0]) - k.gate("flux_dance_refocus_3", [0]) - k.gate("flux_dance_refocus_4", [0]) - k.barrier([]) - for q_idx in all_q_idxs: - k.gate("rym90", [q_idx]) - k.measure(q_idx) - p.add_kernel(k) + ''' + Sandia's weight-4 parity check benchmark protocol. + ''' + delays = {} + p = OqlProgram("Sandia_parity_benchmark", platf_cfg) + # lb = ["P_0000","P_1111","Single_parity_check","Double_parity_check"] + # for i,ks in enumerate(lb): - k = p.create_kernel("Double_parity_check") - all_q_idxs = QDs+[qA] - for q_idx in all_q_idxs: - k.prepz(q_idx) - k.gate("ry90", [q_idx]) - k.barrier([]) - k.gate("flux_dance_refocus_1", [0]) - k.gate("flux_dance_refocus_2", [0]) - k.gate("flux_dance_refocus_3", [0]) - k.gate("flux_dance_refocus_4", [0]) - k.barrier([]) - for q_idx in all_q_idxs: - k.gate("rym90", [q_idx]) - k.barrier([]) - k.measure(qA) - - # correct for msmt induced phaseshift on data qubits using phi-echo pulses - k.gate("cw_30", [2]) - k.gate('wait', [2], 360) - k.gate("cw_30", [0]) - k.gate('wait', [0], 380) - k.gate("cw_30", [13]) - k.gate('wait', [13], 280) - k.gate("cw_30", [16]) - k.gate('wait', [16], 320) - k.barrier([]) - - for q_idx in all_q_idxs: - k.gate("ry90", [q_idx]) - k.barrier([]) - k.gate("flux_dance_refocus_1", [0]) - k.gate("flux_dance_refocus_2", [0]) - k.gate("flux_dance_refocus_3", [0]) - k.gate("flux_dance_refocus_4", [0]) - k.barrier([]) - for q_idx in all_q_idxs: - k.gate("rym90", [q_idx]) - k.measure(q_idx) - p.add_kernel(k) + k = p.create_kernel("P_0000") + all_q_idxs = QDs+[qA] + for q_idx in all_q_idxs: + k.prepz(q_idx) + k.measure(q_idx) + p.add_kernel(k) - p.compile() + k = p.create_kernel("P_1111") + all_q_idxs = QDs+[qA] + for q_idx in all_q_idxs: + k.prepz(q_idx) + k.gate("rx180", [q_idx]) + k.measure(q_idx) + p.add_kernel(k) - return p + k = p.create_kernel("Single_parity_check") + all_q_idxs = QDs+[qA] + for q_idx in all_q_idxs: + k.prepz(q_idx) + k.gate("ry90", [q_idx]) + k.barrier([]) + # k.gate("flux_dance_refocus_1", [0]) + # k.gate("flux_dance_refocus_2", [0]) + # k.gate("flux_dance_refocus_3", [0]) + # k.gate("flux_dance_refocus_4", [0]) + k.gate('wait', [], wait_time_before_flux) + for flux_cw in flux_cw_list: + k.gate(flux_cw, [0]) + k.gate('wait', [], wait_time_after_flux) + k.barrier([]) + for q_idx in all_q_idxs: + k.gate("rym90", [q_idx]) + k.measure(q_idx) + p.add_kernel(k) -# FIXME: not really used, and partly uses hardcoded qubits -def grovers_two_qubit_all_inputs( - q0: int, q1: int, - platf_cfg: str, - precompiled_flux: bool = True, + + k = p.create_kernel("Double_parity_check") + all_q_idxs = QDs+[qA] + for q_idx in all_q_idxs: + k.prepz(q_idx) + k.gate("ry90", [q_idx]) + k.barrier([]) + # k.gate("flux_dance_refocus_1", [0]) + # k.gate("flux_dance_refocus_2", [0]) + # k.gate("flux_dance_refocus_3", [0]) + # k.gate("flux_dance_refocus_4", [0]) + k.gate('wait', [], wait_time_before_flux) + for flux_cw in flux_cw_list: + k.gate(flux_cw, [0]) + k.gate('wait', [], wait_time_after_flux) + k.barrier([]) + for q_idx in all_q_idxs: + k.gate("rym90", [q_idx]) + k.barrier([]) + k.measure(qA) + + # correct for msmt induced phaseshift on data qubits using phi-echo pulses + for q in QDs: + for cycle in range(int(720/6/20)): + k.gate('i', [q]) + k.gate('rx180', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('rY180', [q]) + k.gate('i', [q]) + k.barrier([]) + + for q_idx in all_q_idxs: + k.gate("ry90", [q_idx]) + k.barrier([]) + # k.gate("flux_dance_refocus_1", [0]) + # k.gate("flux_dance_refocus_2", [0]) + # k.gate("flux_dance_refocus_3", [0]) + # k.gate("flux_dance_refocus_4", [0]) + k.gate('wait', [], wait_time_before_flux) + for flux_cw in flux_cw_list: + k.gate(flux_cw, [0]) + k.gate('wait', [], wait_time_after_flux) + k.barrier([]) + for q_idx in all_q_idxs: + k.gate("rym90", [q_idx]) + k.measure(q_idx) + p.add_kernel(k) + + p.compile() + + return p + +# FIXME: not really used, and partly uses hardcoded qubits +def grovers_two_qubit_all_inputs( + q0: int, q1: int, + platf_cfg: str, + precompiled_flux: bool = True, second_CZ_delay: int = 0, CZ_duration: int = 260, add_echo_pulses: bool = False, cal_points: bool = True -) -> OqlProgram: + ) -> OqlProgram: """ Writes the QASM sequence for Grover's algorithm on two qubits. Sequence: @@ -2468,7 +2643,7 @@ def grovers_two_qubits_repeated( qubits, platf_cfg: str, nr_of_grover_iterations: int -) -> OqlProgram: + ) -> OqlProgram: """ Writes the QASM sequence for Grover's algorithm on two qubits. Sequence: @@ -2546,7 +2721,7 @@ def grovers_tomography( second_CZ_delay: int = 260, CZ_duration: int = 260, add_echo_pulses: bool = False -) -> OqlProgram: + ) -> OqlProgram: """ Tomography sequence for Grover's algorithm. @@ -2629,7 +2804,7 @@ def CZ_poisoned_purity_seq( platf_cfg: str, nr_of_repeated_gates: int, cal_points: bool = True -) -> OqlProgram: + ) -> OqlProgram: """ Creates the |00> + |11> Bell state and does a partial tomography in order to determine the purity of both qubits. @@ -2695,7 +2870,7 @@ def Chevron_first_manifold( buffer_time2, flux_cw: int, platf_cfg: str -) -> OqlProgram: + ) -> OqlProgram: """ Writes output files to the directory specified in openql. Output directory is set as an attribute to the program for convenience. @@ -2744,7 +2919,7 @@ def partial_tomography_cardinal( # second_CZ_delay: int = 260, # CZ_duration: int = 260, # add_echo_pulses: bool = False -) -> OqlProgram: + ) -> OqlProgram: """ Tomography sequence for Grover's algorithm. @@ -2856,7 +3031,7 @@ def sliding_flux_pulses_seq( flux_codeword_b: str = 'fl_cw_01', ramsey_axis: str = 'x', add_cal_points: bool = True -) -> OqlProgram: + ) -> OqlProgram: """ Experiment to measure effect flux pulses on each other. @@ -2941,7 +3116,7 @@ def two_qubit_state_tomography( platf_cfg, wait_after_flux: float = None, flux_codeword: str = 'cz' -) -> OqlProgram: + ) -> OqlProgram: p = OqlProgram("state_tomography_2Q_{}_{}_{}".format(product_state, qubit_idxs[0], qubit_idxs[1]), platf_cfg) q0 = qubit_idxs[0] @@ -3129,7 +3304,7 @@ def two_qubit_Depletion( time: float, sequence_type='sequential', double_points: bool = False -) -> OqlProgram: + ) -> OqlProgram: """ """ @@ -3231,7 +3406,7 @@ def Two_qubit_RTE( ramsey_time_1: int = 120, ramsey_time_2: int = 120, echo: bool = False -) -> OqlProgram: + ) -> OqlProgram: """ """ @@ -3302,7 +3477,7 @@ def Two_qubit_RTE_pipelined( start_states: list = ['0'], ramsey_time: int = 120, echo: bool = False -) -> OqlProgram: + ) -> OqlProgram: """ """ @@ -3366,13 +3541,13 @@ def Ramsey_cross( echo: bool, platf_cfg: str, initial_state: str = '0' -) -> OqlProgram: + ) -> OqlProgram: """ q_target is ramseyed q_spec is measured """ - p = OqlProgram("Ramsey_msmt_induced_dephasing", platf_cfg) # FIXME: duplicate name, does not match function name + p = OqlProgram("Ramsey_msmt_induced_dephasing", platf_cfg) for i, angle in enumerate(angles[:-4]): cw_idx = angle // 20 + 9 @@ -3418,56 +3593,6 @@ def Ramsey_cross( return p -def TEST_RTE(QX: int, QZ: int, platf_cfg: str, measurements: int) -> OqlProgram: - """ - - """ - p = OqlProgram('Multi_RTE', platf_cfg) - - k = p.create_kernel('Multi_RTE') - k.prepz(QX) - k.prepz(QZ) - ###################### - # Parity check - ###################### - for m in range(measurements): - # Superposition - k.gate('ry90', [QX]) - k.gate('i', [QZ]) - # CZ emulation - k.gate('i', [QZ, QX]) - k.gate('i', [QZ, QX]) - k.gate('i', [QZ, QX]) - # CZ emulation - k.gate('i', [QZ, QX]) - k.gate('i', [QZ, QX]) - k.gate('i', [QZ, QX]) - # intermidate sequential - k.gate('rym90', [QX]) - k.gate('i', [QZ]) - k.gate('i', [QX]) - k.gate('ry90', [QZ]) - # CZ emulation - k.gate('i', [QZ, QX]) - k.gate('i', [QZ, QX]) - k.gate('i', [QZ, QX]) - # CZ emulation - k.gate('i', [QZ, QX]) - k.gate('i', [QZ, QX]) - k.gate('i', [QZ, QX]) - # Recovery pulse - k.gate('i', [QX]) - k.gate('rym90', [QZ]) - # Measurement - k.measure(QX) - k.measure(QZ) - - p.add_kernel(k) - - p.compile() - return p - - def multi_qubit_AllXY(qubits_idx: list, platf_cfg: str, double_points: bool = True) -> OqlProgram: """ Used for AllXY measurement and calibration for multiple qubits simultaneously. @@ -3513,7 +3638,6 @@ def multi_qubit_AllXY(qubits_idx: list, platf_cfg: str, double_points: bool = Tr p.compile() return p - # FIXME: indentation is wrong in functions below def multi_qubit_rabi(qubits_idx: list, platf_cfg: str = None) -> OqlProgram: p = OqlProgram("Multi_qubit_rabi", platf_cfg) @@ -3610,7 +3734,7 @@ def multi_qubit_flipping( cal_points: bool = True, ax: str = 'x', angle: str = '180' -) -> OqlProgram: + ) -> OqlProgram: n_qubits = len(qubits_idx) if cal_points: @@ -3673,66 +3797,13 @@ def multi_qubit_motzoi(qubits_idx: list, platf_cfg: str = None) -> OqlProgram: return p -# def Ramsey_tomo(qR: int, -# qC: int, -# exc_specs: list, -# platf_cfg: str): -# """ -# Performs single qubit tomography on a qubit in the equator. -# """ - -# p = OqlProgram('single_qubit_tomo', platf_cfg) - -# Tomo_bases = ['Z', 'X', 'Y'] -# Tomo_gates = ['I', 'rym90', 'rx90'] - -# for i in range(2): -# for basis, gate in zip(Tomo_bases, Tomo_gates): -# k = oqh.create_kernel('Tomo_{}_off_{}'.format(basis, i), p) -# k.prepz(qR) -# k.prepz(qC) -# for qS in exc_specs: -# k.gate('rx180', [qS]) -# k.gate('ry90', [qR]) -# k.gate('cz', [qR, qC], 60) -# k.gate('wait', [qR, qC], 0) -# k.gate(gate, [qR]) -# k.measure(qR) -# k.measure(qC) - -# p.add_kernel(k) - -# k = oqh.create_kernel('Tomo_{}_on_{}'.format(basis, i), p) -# k.prepz(qR) -# k.prepz(qC) -# for qS in exc_specs: -# k.gate('rx180', [qS]) -# k.gate('ry90', [qR]) -# k.gate('ry180', [qC]) -# k.gate('cz', [qR, qC], 60) -# k.gate('wait', [qR, qC], 0) -# k.gate(gate, [qR]) -# k.gate('ry180', [qC]) -# k.measure(qR) -# k.measure(qC) - -# p.add_kernel(k) - -# oqh.add_multi_q_cal_points(p, -# qubits=[qR, qC], -# combinations=['00', '10', '20', '01']) - -# p.compile() -# return p - - def Ramsey_tomo( qR: list, qC: list, exc_specs: list, platf_cfg: str, flux_codeword: str = 'cz' -) -> OqlProgram: + ) -> OqlProgram: """ Performs single qubit tomography on a qubit in the equator. """ @@ -3811,3 +3882,1269 @@ def Ramsey_tomo( p.compile() return p + + +def gate_process_tomograhpy( + meas_qubit_idx: int, + gate_qubit_idx: list, + gate_name: str, + gate_duration_ns: int, + platf_cfg: str, + wait_after_gate_ns: int = 0): + ''' + Process tomography on a qubit () while performing + gates on other qubits (). + ''' + _gate_duration_ns = gate_duration_ns+wait_after_gate_ns + if _gate_duration_ns%20 > 0: + _gate_duration_ns = ( _gate_duration_ns//20)*20 + 20 + states = {'0': 'i', + '1': 'rx180', + 'p': 'ry90', + 'm': 'rym90', + 'pi': 'rxm90', + 'mi': 'rx90'} + meas_bases = {'Z':'i', + 'X':'rym90', + 'Y':'rx90'} + p = OqlProgram('Gate_process_tomo', platf_cfg) + # Idle tomography + for state in states.keys(): + for basis in meas_bases.keys(): + k = p.create_kernel(f'state_{state}_tomo_{basis}_idle') + # State preparation + k.prepz(meas_qubit_idx) + # k.prepz(gate_qubit_idx) + k.gate(states[state], [meas_qubit_idx]) + # Idle for gate duration + k.gate('wait', [], _gate_duration_ns) + # Measurement in basis + k.gate(meas_bases[basis], [meas_qubit_idx]) + k.measure(meas_qubit_idx) + p.add_kernel(k) + # Gate process tomography + for state in states.keys(): + for basis in meas_bases.keys(): + k = p.create_kernel(f'state_{state}_tomo_{basis}_gate') + # State preparation + k.prepz(meas_qubit_idx) + # k.prepz(gate_qubit_idx) + k.gate(states[state], [meas_qubit_idx]) + # Play gate + k.gate('wait', []) + for q in gate_qubit_idx: + k.gate(gate_name, [q]) + k.gate('wait', [], wait_after_gate_ns) + # Measurement in basis + k.gate(meas_bases[basis], [meas_qubit_idx]) + k.measure(meas_qubit_idx) + p.add_kernel(k) + # Calibration_points + k = p.create_kernel("cal_0") + k.prepz(meas_qubit_idx) + k.measure(meas_qubit_idx) + p.add_kernel(k) + k = p.create_kernel("cal_1") + k.prepz(meas_qubit_idx) + k.gate('rx180', [meas_qubit_idx]) + k.measure(meas_qubit_idx) + p.add_kernel(k) + k = p.create_kernel("cal_2") + k.prepz(meas_qubit_idx) + k.gate('rx180', [meas_qubit_idx]) + k.gate('rx12', [meas_qubit_idx]) + k.measure(meas_qubit_idx) + p.add_kernel(k) + # Compile + p.compile() + return p + +def repeated_CZ_experiment( + qubit_idxs: list, + rounds: int, + gate_time_ns: int, + flux_codeword: str, + heralded_init: bool, + platf_cfg: str) -> OqlProgram: + + p = OqlProgram('repeated_CZ_experiment', platf_cfg) + # Main experiment + k = p.create_kernel("Gate_experiment") + k.prepz(qubit_idxs[0]) + if heralded_init: + for qb in qubit_idxs: + k.measure(qb) + for i in range(rounds): + for qb in qubit_idxs: + k.gate('rx90', [qb]) + k.gate("wait", []) + k.gate(flux_codeword, [qubit_idxs[0], qubit_idxs[1]]) + k.gate("wait", []) + for qb in qubit_idxs: + k.measure(qb) + k.gate("wait", []) + + + p.add_kernel(k) + # Reference experiment + k = p.create_kernel("Reference_experiment") + k.prepz(qubit_idxs[0]) + if heralded_init: + for qb in qubit_idxs: + k.measure(qb) + for i in range(rounds): + for qb in qubit_idxs: + k.gate('rx90', [qb]) + k.gate("wait", [], gate_time_ns) + for qb in qubit_idxs: + k.measure(qb) + k.gate("wait", []) + p.add_kernel(k) + # Calibration_points + states = ['0','1', '2'] + combinations = [''.join(s) for s in itertools.product(states, repeat=2)] + p.add_multi_q_cal_points(qubits=qubit_idxs, + combinations=combinations, + reps_per_cal_pnt=1) + # Compile + p.compile() + return p + + +def MUX_RO_sequence( + qubit_idxs: list, + heralded_init: bool, + platf_cfg: str, + states: list = ['0', '1']) -> OqlProgram: + p = OqlProgram('MUX_SSRO_experiment', platf_cfg) + n_qubits = len(qubit_idxs) + combinations = [''.join(s) for s in itertools.product(states, repeat=n_qubits)] + p.add_multi_q_cal_points(qubits=qubit_idxs, + combinations=combinations, + reps_per_cal_pnt=1, + heralded_init=heralded_init) + # Compile + p.compile() + return p + + +############################### +# Surface-17 specific routines +############################### +def repeated_stabilizer_data_measurement_sequence( + target_stab:str, + Q_anc: int, + Q_D: list, + X_anci_idxs:list, + Z_anci_idxs:list, + data_idxs:list, + lru_idxs:list, + platf_cfg: str, + Rounds: list, + experiments: list, + stabilizer_type: str = 'X', + initial_state_qubits: list = [], + measurement_time_ns: int = 500, + ): + p = OqlProgram("Repeated_stabilizer_seq", platf_cfg) + Valid_experiments = ['single_stabilizer', 'single_stabilizer_LRU', + 'surface_13', 'surface_13_LRU', 'surface_17', + 'repetition_code'] + for exp in experiments: + assert exp in Valid_experiments, f'Experiment {exp} not a valid experiment' + assert stabilizer_type in ['X', 'Z'], '"stabilizer_type" must be "X" or "Z"' + # Data qubit idx dictionary + data_qubit_map = { "D1": 6, "D2": 2, "D3": 0, + "D4": 15, "D5": 13, "D7": 1, + "D6": 16, "D8": 5, "D9": 4 } + ancilla_qubit_map = {"Z1": 7, "Z2": 14, "Z3": 12, "Z4": 10, + "X1": 11, "X2": 8, "X3": 3, "X4": 9} + if initial_state_qubits: + for q_name in initial_state_qubits: + assert q_name in data_qubit_map.keys(), f'qubit {q_name} not a valid qubit.' + # remove X4 + X_anci_idxs.remove(9) + _remaining_ancillas = X_anci_idxs + Z_anci_idxs + _remaining_ancillas.remove(Q_anc) + + n = len(Q_D) + all_Q_idxs = [Q_anc] + Q_D + for n_rounds in Rounds: + + if 'single_stabilizer' in experiments: + k = p.create_kernel(f'Single_stabilizer_seq_{n_rounds}rounds') + # Preparation & heralded_init + for q in data_idxs: + k.prepz(q) + k.measure(q) + for q in _remaining_ancillas+[Q_anc]: + k.measure(q) + k.gate('wait', [], 400) + if initial_state_qubits: + if stabilizer_type == 'Z': + for q_name in initial_state_qubits: + k.gate('rx180', [data_qubit_map[q_name]]) + elif stabilizer_type == 'X': + if q_name in data_qubit_map.keys(): + for q_name in initial_state_qubits: + k.gate('ry90', [data_qubit_map[q_name]]) + else: + k.gate('rym90', [data_qubit_map[q_name]]) + else: + raise ValueError('Only Z and X type allowed.') + k.barrier([]) + # QEC Rounds + for i in range(n_rounds): + # First Pi/2 pulse + if stabilizer_type == 'X': + for q in all_Q_idxs: + k.gate("ry90", [q]) + else: + for q in [Q_anc]: + k.gate("ry90", [q]) + k.barrier([]) + # Flux dance + k.gate('wait', [], 0) + if 'Z' == target_stab[:1]: + ## Z Flux dances + k.gate(f'flux_dance_5', [0]) + k.gate(f'flux_dance_6', [0]) + k.gate(f'flux_dance_7', [0]) + k.gate(f'flux_dance_8', [0]) + else: + ## X Flux dances + k.gate(f'flux_dance_1', [0]) + k.gate(f'flux_dance_2', [0]) + k.gate(f'flux_dance_3', [0]) + k.gate(f'flux_dance_4', [0]) + k.gate('wait', [], 0) + # Second Pi/2 pulse + if stabilizer_type == 'X': + for q in all_Q_idxs: + k.gate("rym90", [q]) + else: + for q in [Q_anc]: + k.gate("rym90", [q]) + k.barrier([]) + # Measurement of ancillas + k.measure(Q_anc) + for q in _remaining_ancillas: + k.measure(q) + if i == n_rounds-1: + for q in Q_D: + if stabilizer_type == 'X': + k.gate('rym90', [q]) + k.measure(q) + else: + for q in Q_D: + # Measurement Echo + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('rx180', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate("wait", [], 0) + p.add_kernel(k) + + if 'single_stabilizer_LRU' in experiments: + k = p.create_kernel(f'Single_stabilizer_LRU_seq_{n_rounds}rounds') + # Preparation & heralded_init + for q in data_idxs: + k.prepz(q) + k.measure(q) + for q in _remaining_ancillas+[Q_anc]: + k.measure(q) + k.gate('wait', [], 400) + if initial_state_qubits: + if stabilizer_type == 'Z': + for q_name in initial_state_qubits: + k.gate('rx180', [data_qubit_map[q_name]]) + elif stabilizer_type == 'X': + if q_name in data_qubit_map.keys(): + for q_name in initial_state_qubits: + k.gate('ry90', [data_qubit_map[q_name]]) + else: + k.gate('rym90', [data_qubit_map[q_name]]) + else: + raise ValueError('Only Z and X type allowed.') + k.barrier([]) + # QEC Rounds + for i in range(n_rounds): + # First Pi/2 pulse + if stabilizer_type == 'X': + for q in all_Q_idxs: + k.gate("ry90", [q]) + else: + for q in [Q_anc]: + k.gate("ry90", [q]) + k.barrier([]) + # Flux dance + k.gate('wait', [], 0) + if 'Z' == target_stab[:1]: + ## Z Flux dances + k.gate(f'flux_dance_5', [0]) + k.gate(f'flux_dance_6', [0]) + k.gate(f'flux_dance_7', [0]) + k.gate(f'flux_dance_8', [0]) + else: + ## X Flux dances + k.gate(f'flux_dance_1', [0]) + k.gate(f'flux_dance_2', [0]) + k.gate(f'flux_dance_3', [0]) + k.gate(f'flux_dance_4', [0]) + k.gate('wait', [], 0) + # Second Pi/2 pulse + if stabilizer_type == 'X': + for q in all_Q_idxs: + k.gate("rym90", [q]) + else: + for q in [Q_anc]: + k.gate("rym90", [q]) + k.barrier([]) + # Measurement of ancillas + k.measure(Q_anc) + for q in _remaining_ancillas: + k.measure(q) + if i == n_rounds-1: + for q in Q_D: + if stabilizer_type == 'X': + k.gate('rym90', [q]) + k.measure(q) + else: + k.barrier([Q_anc, 20, 21, 22, 23, 24, 25]+_remaining_ancillas) + for q in lru_idxs: + k.gate('lru', [q]) + # LRU ancillas + # k.gate('lru', [3]) # X3 + # k.gate('lru', [12]) # Z3 + # k.gate('lru', [10]) # Z4 + # k.gate('lru', [9]) # X4 + # k.gate('lru', [7]) # Z1 + # k.gate('lru', [14]) # Z2 + # LRU Data qubits + # k.gate('lru', [13]) # D5 + # k.gate('lru', [15]) # D4 + # k.gate('lru', [16]) # D6 + k.gate('wait', [20, 21, 22, 23, 24, 25], 180) + k.gate('wait', [17, 18, 19], 200) + for q in Q_D: + # if q in Q_D: + # Measurement Echo + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('rx180', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + # LRU Echo + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('update_ph_LRU', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + # else: + # # Measurement Echo + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('rx180', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # # LRU Echo + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('rx180', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + k.gate("wait", [], 0) + p.add_kernel(k) + + if 'surface_13' in experiments: + k = p.create_kernel(f'Surface_13_seq_{n_rounds}rounds') + # Preparation & heralded_init + # for q in data_idxs+[Q_anc]: + for q in data_idxs: + k.prepz(q) + k.measure(q) + for q in _remaining_ancillas+[Q_anc]: + k.measure(q) + k.gate('wait', [], 400) + if initial_state_qubits: + if stabilizer_type == 'Z': + for q_name in initial_state_qubits: + k.gate('rx180', [data_qubit_map[q_name]]) + elif stabilizer_type == 'X': + if q_name in data_qubit_map.keys(): + for q_name in initial_state_qubits: + k.gate('ry90', [data_qubit_map[q_name]]) + else: + k.gate('rym90', [data_qubit_map[q_name]]) + else: + raise ValueError('Only Z and X type allowed.') + k.barrier([]) + # QEC Rounds + for i in range(n_rounds): + # First Pi/2 pulse + if stabilizer_type == 'X': + for q in data_idxs: + k.gate("rym90", [q]) + if 'Z' == target_stab[:1]: # If ancillas are Z + for q in Z_anci_idxs: + k.gate("ry90", [q]) + else: # If ancillas are X + for q in X_anci_idxs: + k.gate("ry90", [q]) + k.barrier([]) + # Flux dance + k.gate('wait', []) + if 'Z' == target_stab[:1]: + ## Z Flux dances + k.gate(f'flux_dance_5', [0]) + k.gate(f'flux_dance_6', [0]) + k.gate(f'flux_dance_7', [0]) + k.gate(f'flux_dance_8', [0]) + else: + ## X Flux dances + k.gate(f'flux_dance_1', [0]) + k.gate(f'flux_dance_2', [0]) + k.gate(f'flux_dance_3', [0]) + k.gate(f'flux_dance_4', [0]) + k.gate('wait', [], 0) + k.barrier([]) + # Second Pi/2 pulse + if stabilizer_type == 'X': + for q in data_idxs: + k.gate("ry90", [q]) + if 'Z' == target_stab[:1]: # If ancillas are Z + for q in Z_anci_idxs: + k.gate("rym90", [q]) + else: # If ancillas are X + for q in X_anci_idxs: + k.gate("rym90", [q]) + k.barrier([]) + # Measurement of ancillas + k.measure(Q_anc) + for q in _remaining_ancillas: + k.measure(q) + if i == n_rounds-1: + for q in data_idxs: + if stabilizer_type == 'X': + k.gate('rym90', [q]) + k.measure(q) + else: + for q in data_idxs: + # Single measurement Echo + idle_time = (measurement_time_ns-20)//2 + nr_idles = idle_time//20 + for idle in range(nr_idles): + k.gate('i', [q]) + k.gate('rx180', [q]) + for idle in range(nr_idles): + k.gate('i', [q]) + + # for cycle in range(int(480/6/20)): + # k.gate('i', [q]) + # k.gate('rx180', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('rY180', [q]) + # k.gate('i', [q]) + k.gate("wait", [], 0) + p.add_kernel(k) + + if 'surface_13_LRU' in experiments: + k = p.create_kernel(f'Surface_13_LRU_seq_{n_rounds}rounds') + # Preparation & heralded_init + for q in data_idxs: + k.prepz(q) + k.measure(q) + for q in _remaining_ancillas+[Q_anc]: + k.measure(q) + k.gate('wait', [], 400) + if initial_state_qubits: + if stabilizer_type == 'Z': + for q_name in initial_state_qubits: + k.gate('rx180', [data_qubit_map[q_name]]) + elif stabilizer_type == 'X': + if q_name in data_qubit_map.keys(): + for q_name in initial_state_qubits: + k.gate('ry90', [data_qubit_map[q_name]]) + else: + k.gate('rym90', [data_qubit_map[q_name]]) + else: + raise ValueError('Only Z and X type allowed.') + k.barrier([]) + # QEC Rounds + for i in range(n_rounds): + # First Pi/2 pulse + if stabilizer_type == 'X': + for q in data_idxs: + k.gate("rym90", [q]) + if 'Z' == target_stab[:1]: # If ancillas are Z + for q in Z_anci_idxs: + k.gate("ry90", [q]) + else: # If ancillas are X + for q in X_anci_idxs: + k.gate("ry90", [q]) + k.barrier([]) + # Flux dance + # k.gate('wait', [], 20) + if 'Z' == target_stab[:1]: + ## Z Flux dances + k.gate(f'flux_dance_5', [0]) + k.gate(f'flux_dance_6', [0]) + k.gate(f'flux_dance_7', [0]) + k.gate(f'flux_dance_8', [0]) + else: + ## X Flux dances + k.gate(f'flux_dance_1', [0]) + k.gate(f'flux_dance_2', [0]) + k.gate(f'flux_dance_3', [0]) + k.gate(f'flux_dance_4', [0]) + k.gate('wait', [], 0) + k.barrier([]) + # Second Pi/2 pulse + if stabilizer_type == 'X': + for q in data_idxs: + k.gate("ry90", [q]) + if 'Z' == target_stab[:1]: # If ancillas are Z + for q in Z_anci_idxs: + k.gate("rym90", [q]) + else: # If ancillas are X + for q in X_anci_idxs: + k.gate("rym90", [q]) + k.barrier([]) + k.gate('wait', [], 0) + # Measurement of ancillas + k.measure(Q_anc) + for q in _remaining_ancillas: + k.measure(q) + if i == n_rounds-1: + for q in data_idxs: + if stabilizer_type == 'X': + k.gate('rym90', [q]) + k.measure(q) + else: + k.barrier([Q_anc, 20, 21, 22, 23, 24, 25]+_remaining_ancillas) + for q in lru_idxs: + k.gate('lru', [q]) + # LRU ancillas + # evaluate LRU idle time + idle_LRU = measurement_time_ns - 20 + Z_idle_LRU = measurement_time_ns//3-20 + k.gate('wait', [17, 18, 19], idle_LRU+Z_idle_LRU+20) + k.gate('wait', [21, 22, 23, 24], Z_idle_LRU-20) + for q in data_idxs: + # Single measurement Echo + idle_time = (measurement_time_ns-20)//2 + nr_idles = idle_time//20 + nr_idles_Z = (measurement_time_ns//3-20)//40 + for idle in range(nr_idles+nr_idles_Z): + k.gate('i', [q]) + k.gate('rx180', [q]) + for idle in range(nr_idles+nr_idles_Z): + k.gate('i', [q]) + # for cycle in range(int(480/6/20)): + # k.gate('i', [q]) + # k.gate('rx180', [q]) + # k.gate('i', [q]) + # k.gate('i', [q]) + # k.gate('rY180', [q]) + # k.gate('i', [q]) + k.gate("wait", [], ) + p.add_kernel(k) + + if 'surface_17' in experiments: + k = p.create_kernel(f'Surface_17_{n_rounds}rounds') + # Preparation & heralded_init + for q in data_idxs: + k.prepz(q) + k.measure(q) + for q in _remaining_ancillas+[Q_anc]: + k.measure(q) + k.gate('wait', [], 300) + if initial_state_qubits: + for q_name in initial_state_qubits: + k.gate('rx180', [data_qubit_map[q_name]]) + k.barrier([]) + # QEC Rounds + for i in range(n_rounds): + # First Pi/2 pulse + for q in Z_anci_idxs: + k.gate("ry90", [q]) + k.barrier([]) + # Flux dance + k.gate('wait', [], 0) + ## Z Flux dances + k.gate(f'flux_dance_5', [0]) + k.gate(f'flux_dance_6', [0]) + k.gate(f'flux_dance_7', [0]) + k.gate(f'flux_dance_8', [0]) + k.gate('wait', [], 0) + k.barrier([]) + # Second Pi/2 pulse + for q in Z_anci_idxs: + k.gate("rym90", [q]) + k.barrier([]) + + for qa in X_anci_idxs: + k.gate("ry90", [qa]) + for qa in data_idxs: + k.gate("rym90", [qa]) + k.barrier([]) + + #X Flux dances + k.gate('wait', [], 0) + k.gate(f'flux_dance_1', [0]) + k.gate(f'flux_dance_2', [0]) + k.gate(f'flux_dance_3', [0]) + k.gate(f'flux_dance_4', [0]) + k.gate('wait', [], 0) + k.barrier([]) + + for qa in X_anci_idxs: + k.gate("rym90", [qa]) + for qa in data_idxs: + k.gate("ry90", [qa]) + k.barrier([]) + k.measure(Q_anc) + for q in _remaining_ancillas: + k.measure(q) + if i == n_rounds-1: + for q in data_idxs: + k.measure(q) + else: + for q in data_idxs: + # Measurement Echo + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('rx180', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate('i', [q]) + k.gate("wait", [], 0) + p.add_kernel(k) + + if 'repetition_code' in experiments: + k = p.create_kernel(f'Repetition_code_seq_{n_rounds}rounds') + # Preparation & heralded_init + for q in data_idxs: + k.prepz(q) + k.measure(q) + for q in _remaining_ancillas+[Q_anc]: + k.prepz(q) + k.measure(q) + k.gate('wait', [], 400) # to avoid UHF trigger holdoff! + # TODO: Add arbitrary state initialization + if initial_state_qubits: + if stabilizer_type == 'Z': + for q in Q_D: + k.gate("ry90", [q]) + if stabilizer_type == 'X': + for q in Q_D: + k.gate("i", [q]) + k.barrier([]) + # QEC Rounds + for i in range(n_rounds): + # First Pi/2 pulse + if stabilizer_type == 'X': + for q in Q_D: + k.gate("ry90", [q]) + # for q in Z_anci_idxs+X_anci_idxs: + for q in [Q_anc]: + if q in [ancilla_qubit_map[qa] for qa in \ + ['X3', 'Z1', 'X1', 'Z2']]: + k.gate("ry90", [q]) + # Flux dance + k.gate('wait', []) + k.gate(f'repetition_code_1', [0]) + k.gate(f'repetition_code_2', [0]) + k.gate('wait', []) + # Second Pi/2 pulse + # for q in Z_anci_idxs+X_anci_idxs: + for q in [Q_anc]: + if q in [ancilla_qubit_map[qa] for qa in \ + ['X3', 'Z1', 'X1', 'Z2']]: + k.gate("rym90", [q]) + # First Pi/2 pulse + elif q in [ancilla_qubit_map[qa] for qa in \ + ['Z3', 'X4', 'Z4', 'X2']]: + k.gate("ry90", [q]) + # Flux dance + k.gate('wait', []) + k.gate(f'repetition_code_3', [0]) + k.gate(f'repetition_code_4', [0]) + k.gate('wait', []) + # Second Pi/2 pulse + if stabilizer_type == 'X': + for q in Q_D: + k.gate("rym90", [q]) + # for q in Z_anci_idxs+X_anci_idxs: + for q in [Q_anc]: + if q in [ancilla_qubit_map[qa] for qa in \ + ['Z3', 'X4', 'Z4', 'X2']]: + k.gate("rym90", [q]) + k.gate('wait', []) + # Measurement of ancillas + for q in [Q_anc]: + k.measure(q) + # Measure remaining ancillas + for q in _remaining_ancillas: + k.measure(q) + # Measure data qubits only at the last round + if i == n_rounds-1: + for q in data_idxs: + if stabilizer_type == 'X': + k.gate('rym90', [q]) + k.measure(q) + else: + for q in Q_D: + # Single measurement Echo + idle_time = (measurement_time_ns-20)//2 + nr_idles = idle_time//20 + for idle in range(nr_idles): + k.gate('i', [q]) + k.gate('rx180', [q]) + for idle in range(nr_idles): + k.gate('i', [q]) + k.gate("wait", [], 0) + p.add_kernel(k) + + ###################### + # Calibration points # + ###################### + # Calibration 000 + k = p.create_kernel('Cal_zeros') + for q in data_idxs: + k.prepz(q) + k.measure(q) + for q in _remaining_ancillas+[Q_anc]: + k.measure(q) + k.gate('wait', [], 400) + for q in data_idxs: + k.measure(q) + for q in _remaining_ancillas+[Q_anc]: + k.measure(q) + p.add_kernel(k) + # Calibration 111 + k = p.create_kernel('Cal_ones') + for q in data_idxs: + k.prepz(q) + k.measure(q) + for q in _remaining_ancillas+[Q_anc]: + k.measure(q) + k.gate('wait', [], 400) + for q in data_idxs+_remaining_ancillas+[Q_anc]: + k.gate('rx180', [q]) + k.measure(q) + p.add_kernel(k) + # Calibration 222 + k = p.create_kernel('Cal_twos') + for q in data_idxs: + k.prepz(q) + k.measure(q) + for q in _remaining_ancillas+[Q_anc]: + k.measure(q) + k.gate('wait', [], 400) + for q in data_idxs+_remaining_ancillas+[Q_anc]: + k.gate('rx180', [q]) + k.gate('rx12', [q]) + k.measure(q) + p.add_kernel(k) + + p.compile() + return p + + +def repetition_code_sequence( + involved_ancilla_indices: List[int], + involved_data_indices: List[int], + all_ancilla_indices: List[int], + all_data_indices: List[int], + platf_cfg: str, + array_of_round_number: List[int], + stabilizer_type: str = 'X', + measurement_time_ns: int = 500, + initial_state: List[int] = None, + ): + """OpenQL program constructor for repetition code experiement (any distance). + + Args: + involved_ancilla_indices (List[int]): Ancilla-qubit indices part of the repetition code. Used by the central controller. Also known as 'cfg_qubit_nr'. + involved_data_indices (List[int]): Data-qubit indices part of the repetition code. Used by the central controller. Also known as 'cfg_qubit_nr'. + all_ancilla_indices (List[int]): All (ancilla) qubit indices on the device. Used for preparation and calibration points. + all_data_indices (List[int]): All (ancilla) qubit indices on the device. Used for preparation and calibration points. + platf_cfg (str): Config-file path, required for OqlProgram construction. + array_of_round_number (list): Array-like of parity-check repetition number. Example [1, 5, 10] will schedule a stabilizer reptition of x1, x5 and x10 respectively. + stabilizer_type (str, optional): _description_. Defaults to 'X'. + measurement_time_ns (int, optional): _description_. Defaults to 500. + + Returns: + OqlProgram: _description_ + """ + p = OqlProgram("Repeated_stabilizer_seq", platf_cfg) + assert stabilizer_type in ['X', 'Z'], '"stabilizer_type" must be "X" or "Z"' + # Data qubit idx dictionary + qubit_id_to_name_lookup: Dict[int, str] = { + 0: "D3", + 1: "D7", + 2: "D2", + 3: "X3", + 4: "D9", + 5: "D8", + 6: "D1", + 7: "Z1", + 8: "X2", + 9: "X4", + 10: "Z4", + 11: "X1", + 12: "Z3", + 13: "D5", + 14: "Z2", + 15: "D4", + 16: "D6", + } + + # remove X4 + qubit_nr_x4: int = 9 + if qubit_nr_x4 in all_ancilla_indices: + all_ancilla_indices.remove(qubit_nr_x4) + + all_qubit_indices = all_ancilla_indices + all_data_indices + + for n_rounds in array_of_round_number: + + k = p.create_kernel(f'Repetition_code_seq_{n_rounds}rounds') + # Preparation & heralded_init + for q in all_qubit_indices: + k.prepz(q) + k.measure(q) + k.gate('wait', [], 400) # to avoid UHF trigger holdoff! + # Arbitrary state initialization + if initial_state is None: + if stabilizer_type == 'Z': + for i, q in enumerate(involved_data_indices): + if i % 2 == 0: + k.gate("i", [q]) + else: + k.gate("rx180", [q]) + # k.gate("i", [q]) + if stabilizer_type == 'X': + for i, q in enumerate(involved_data_indices): + if i % 2 == 0: + k.gate("ry90", [q]) + else: + k.gate("rym90", [q]) + # k.gate("i", [q]) + else: + if stabilizer_type == 'Z': + for i, q in enumerate(involved_data_indices): + if initial_state[i] == 0: + k.gate("i", [q]) + else: + k.gate("rx180", [q]) + # k.gate("i", [q]) + if stabilizer_type == 'X': + for i, q in enumerate(involved_data_indices): + if initial_state[i] == 0: + k.gate("ry90", [q]) + else: + k.gate("rym90", [q]) + # k.gate("i", [q]) + k.barrier([]) + # QEC Rounds + for i in range(n_rounds): + # First Pi/2 pulse + if stabilizer_type == 'X': + for q in involved_data_indices: + k.gate("ry90", [q]) + # for q in Z_anci_idxs+X_anci_idxs: + for q in involved_ancilla_indices: + if qubit_id_to_name_lookup[q] in ['X3', 'Z2']: + k.gate("ry90", [q]) + # Flux dance + k.gate('wait', []) + k.gate(f'repetition_code_1', [0]) + k.gate(f'repetition_code_2', [0]) + k.gate('wait', []) + # Second Pi/2 pulse + # for q in Z_anci_idxs+X_anci_idxs: + for q in involved_ancilla_indices: + if qubit_id_to_name_lookup[q] in ['X3', 'Z2']: + k.gate("rym90", [q]) + # First Pi/2 pulse + elif qubit_id_to_name_lookup[q] in ['Z1', 'X2']: + k.gate("ry90", [q]) + # Flux dance + k.gate('wait', []) + k.gate(f'repetition_code_3', [0]) + k.gate(f'repetition_code_4', [0]) + k.gate('wait', []) + # Second Pi/2 pulse + # for q in Z_anci_idxs+X_anci_idxs: + for q in involved_ancilla_indices: + if qubit_id_to_name_lookup[q] in ['Z1', 'X2']: + k.gate("rym90", [q]) + # First Pi/2 pulse + elif qubit_id_to_name_lookup[q] in ['Z3', 'Z4']: + k.gate("ry90", [q]) + # Flux dance + k.gate('wait', []) + k.gate(f'repetition_code_5', [0]) + k.gate(f'repetition_code_6', [0]) + k.gate('wait', []) + # Second Pi/2 pulse + if stabilizer_type == 'X': + for q in involved_data_indices: + k.gate("rym90", [q]) + # for q in Z_anci_idxs+X_anci_idxs: + for q in involved_ancilla_indices: + if qubit_id_to_name_lookup[q] in ['Z3', 'Z4']: + k.gate("rym90", [q]) + k.gate('wait', []) + + # During any other round, measure only ancilla's and decouple data qubits. + at_last_round: bool = i == n_rounds-1 + if not at_last_round: + # Measure (only) all ancilla's, dynamical decoupling on data qubits + for q in all_ancilla_indices: + k.measure(q) + for q in involved_data_indices: + # Single measurement Echo + idle_time = (measurement_time_ns-20)//2 + nr_idles = idle_time//20 + for idle in range(nr_idles): + k.gate('i', [q]) + k.gate('rx180', [q]) + for idle in range(nr_idles): + k.gate('i', [q]) + k.gate("wait", [], 0) + + # Make sure all qubits are measured in the last round + # Before last round apply correction gate to data qubits, depending on the stabilizer type. + # Final measurement and data qubit correction + if stabilizer_type == 'X': + for q in involved_data_indices: + k.gate('rym90', [q]) + for q in all_qubit_indices: + k.measure(q) + k.gate("wait", [], 0) + p.add_kernel(k) + + ###################### + # Calibration points # + ###################### + # Calibration 000 + k = p.create_kernel('Cal_zeros') + for q in all_qubit_indices: + k.prepz(q) + k.measure(q) + k.gate('wait', [], 400) + for q in all_qubit_indices: + k.measure(q) + p.add_kernel(k) + # Calibration 111 + k = p.create_kernel('Cal_ones') + for q in all_qubit_indices: + k.prepz(q) + k.measure(q) + k.gate('wait', [], 400) + for q in all_qubit_indices: + k.gate('rx180', [q]) + k.measure(q) + p.add_kernel(k) + # Calibration 222 + k = p.create_kernel('Cal_twos') + for q in all_qubit_indices: + k.prepz(q) + k.measure(q) + k.gate('wait', [], 400) + for q in all_qubit_indices: + k.gate('rx180', [q]) + k.gate('rx12', [q]) + k.measure(q) + p.add_kernel(k) + + p.compile() + return p + + +def repetition_code_sequence_old( + involved_ancilla_indices: List[int], + involved_data_indices: List[int], + all_ancilla_indices: List[int], + all_data_indices: List[int], + platf_cfg: str, + array_of_round_number: List[int], + stabilizer_type: str = 'X', + measurement_time_ns: int = 500, + initial_state: List[int] = None, + ): + """OpenQL program constructor for repetition code experiement (any distance). + + Args: + involved_ancilla_indices (List[int]): Ancilla-qubit indices part of the repetition code. Used by the central controller. Also known as 'cfg_qubit_nr'. + involved_data_indices (List[int]): Data-qubit indices part of the repetition code. Used by the central controller. Also known as 'cfg_qubit_nr'. + all_ancilla_indices (List[int]): All (ancilla) qubit indices on the device. Used for preparation and calibration points. + all_data_indices (List[int]): All (ancilla) qubit indices on the device. Used for preparation and calibration points. + platf_cfg (str): Config-file path, required for OqlProgram construction. + array_of_round_number (list): Array-like of parity-check repetition number. Example [1, 5, 10] will schedule a stabilizer reptition of x1, x5 and x10 respectively. + stabilizer_type (str, optional): _description_. Defaults to 'X'. + measurement_time_ns (int, optional): _description_. Defaults to 500. + + Returns: + OqlProgram: _description_ + """ + p = OqlProgram("Repeated_stabilizer_seq", platf_cfg) + assert stabilizer_type in ['X', 'Z'], '"stabilizer_type" must be "X" or "Z"' + # Data qubit idx dictionary + qubit_id_to_name_lookup: Dict[int, str] = { + 0: "D3", + 1: "D7", + 2: "D2", + 3: "X3", + 4: "D9", + 5: "D8", + 6: "D1", + 7: "Z1", + 8: "X2", + 9: "X4", + 10: "Z4", + 11: "X1", + 12: "Z3", + 13: "D5", + 14: "Z2", + 15: "D4", + 16: "D6", + } + + # remove X4 + qubit_nr_x4: int = 9 + if qubit_nr_x4 in all_ancilla_indices: + all_ancilla_indices.remove(qubit_nr_x4) + + all_qubit_indices = all_ancilla_indices + all_data_indices + + for n_rounds in array_of_round_number: + + k = p.create_kernel(f'Repetition_code_seq_{n_rounds}rounds') + # Preparation & heralded_init + for q in all_qubit_indices: + k.prepz(q) + k.measure(q) + k.gate('wait', [], 400) # to avoid UHF trigger holdoff! + # Arbitrary state initialization + if initial_state is None: + if stabilizer_type == 'Z': + for i, q in enumerate(involved_data_indices): + if i % 2 == 0: + k.gate("i", [q]) + else: + k.gate("rx180", [q]) + # k.gate("i", [q]) + if stabilizer_type == 'X': + for i, q in enumerate(involved_data_indices): + if i % 2 == 0: + k.gate("ry90", [q]) + else: + k.gate("rym90", [q]) + # k.gate("i", [q]) + else: + if stabilizer_type == 'Z': + for i, q in enumerate(involved_data_indices): + if initial_state[i] == 0: + k.gate("i", [q]) + else: + k.gate("rx180", [q]) + # k.gate("i", [q]) + if stabilizer_type == 'X': + for i, q in enumerate(involved_data_indices): + if initial_state[i] == 0: + k.gate("ry90", [q]) + else: + k.gate("rym90", [q]) + # k.gate("i", [q]) + k.barrier([]) + # QEC Rounds + for i in range(n_rounds): + # First Pi/2 pulse + if stabilizer_type == 'X': + for q in involved_data_indices: + k.gate("ry90", [q]) + # for q in Z_anci_idxs+X_anci_idxs: + for q in involved_ancilla_indices: + if qubit_id_to_name_lookup[q] in ['X3', 'Z1', 'X1', 'Z2']: + k.gate("ry90", [q]) + # Flux dance + k.gate('wait', []) + k.gate(f'repetition_code_1', [0]) + k.gate(f'repetition_code_2', [0]) + k.gate('wait', []) + # Second Pi/2 pulse + # for q in Z_anci_idxs+X_anci_idxs: + for q in involved_ancilla_indices: + if qubit_id_to_name_lookup[q] in ['X3', 'Z1', 'X1', 'Z2']: + k.gate("rym90", [q]) + # First Pi/2 pulse + elif qubit_id_to_name_lookup[q] in ['Z3', 'X4', 'Z4', 'X2']: + k.gate("ry90", [q]) + # Flux dance + k.gate('wait', []) + k.gate(f'repetition_code_3', [0]) + k.gate(f'repetition_code_4', [0]) + k.gate('wait', []) + # Second Pi/2 pulse + if stabilizer_type == 'X': + for q in involved_data_indices: + k.gate("rym90", [q]) + # for q in Z_anci_idxs+X_anci_idxs: + for q in involved_ancilla_indices: + if qubit_id_to_name_lookup[q] in ['Z3', 'X4', 'Z4', 'X2']: + k.gate("rym90", [q]) + k.gate('wait', []) + + # During any other round, measure only ancilla's and decouple data qubits. + at_last_round: bool = i == n_rounds-1 + if not at_last_round: + # Measure (only) all ancilla's, dynamical decoupling on data qubits + for q in all_ancilla_indices: + k.measure(q) + for q in involved_data_indices: + # Single measurement Echo + idle_time = (measurement_time_ns-20)//2 + nr_idles = idle_time//20 + for idle in range(nr_idles): + k.gate('i', [q]) + k.gate('rx180', [q]) + for idle in range(nr_idles): + k.gate('i', [q]) + k.gate("wait", [], 0) + + # Make sure all qubits are measured in the last round + # Before last round apply correction gate to data qubits, depending on the stabilizer type. + # Final measurement and data qubit correction + if stabilizer_type == 'X': + for q in involved_data_indices: + k.gate('rym90', [q]) + for q in all_qubit_indices: + k.measure(q) + k.gate("wait", [], 0) + p.add_kernel(k) + + ###################### + # Calibration points # + ###################### + # Calibration 000 + k = p.create_kernel('Cal_zeros') + for q in all_qubit_indices: + k.prepz(q) + k.measure(q) + k.gate('wait', [], 400) + for q in all_qubit_indices: + k.measure(q) + p.add_kernel(k) + # Calibration 111 + k = p.create_kernel('Cal_ones') + for q in all_qubit_indices: + k.prepz(q) + k.measure(q) + k.gate('wait', [], 400) + for q in all_qubit_indices: + k.gate('rx180', [q]) + k.measure(q) + p.add_kernel(k) + # Calibration 222 + k = p.create_kernel('Cal_twos') + for q in all_qubit_indices: + k.prepz(q) + k.measure(q) + k.gate('wait', [], 400) + for q in all_qubit_indices: + k.gate('rx180', [q]) + k.gate('rx12', [q]) + k.measure(q) + p.add_kernel(k) + + p.compile() + return p diff --git a/pycqed/measurement/openql_experiments/openql_helpers.py b/pycqed/measurement/openql_experiments/openql_helpers.py index f7f11acc67..063dc1478e 100644 --- a/pycqed/measurement/openql_experiments/openql_helpers.py +++ b/pycqed/measurement/openql_experiments/openql_helpers.py @@ -278,11 +278,11 @@ def check_recompilation_needed_hash_based( } if recompile is False: - if _recompile is True: - log.warning( - "`{}` or\n`{}`\n might have been modified! Are you sure you didn't" - " want to compile?".format(self._platf_cfg, clifford_rb_oql) - ) + # if _recompile is True: + # log.warning( + # "`{}` or\n`{}`\n might have been modified! Are you sure you didn't" + # " want to compile?".format(self._platf_cfg, clifford_rb_oql) + # ) res_dict["recompile"] = False elif recompile is True: # Enforce recompilation @@ -464,9 +464,7 @@ def add_multi_q_cal_points( qubits: List[int], combinations: List[str] = ["00", "01", "10", "11"], reps_per_cal_pnt: int = 1, - f_state_cal_pt_cw: int = 9, # 9 is the one listed as rX12 in `mw_lutman` - nr_flux_dance: int = None, - flux_cw_list: List[str] = None + heralded_init: bool = False, ) -> None: """ @@ -497,37 +495,22 @@ def add_multi_q_cal_points( state_to_gates = { "0": ["i"], "1": ["rx180"], - "2": ["rx180", "cw_{:02}".format(f_state_cal_pt_cw)], + "2": ["rx180", "rX12"], + "3": ["rx180", "rX12", "rX23"], } for i, comb in enumerate(comb_repeated): k = self.create_kernel('cal{}_{}'.format(i, comb)) - # NOTE: for debugging purposes of the effect of fluxing on readout, - # prepend flux dance before calibration points for q_state, q in zip(comb, qubits): k.prepz(q) + if heralded_init: + k.measure(q) k.gate("wait", [], 0) # alignment - - if nr_flux_dance and flux_cw_list: - for i in range(int(nr_flux_dance)): - for flux_cw in flux_cw_list: - k.gate(flux_cw, [0]) - k.gate("wait", [], 0) - # k.gate("wait", [], 20) # prevent overlap of flux with mw gates - for q_state, q in zip(comb, qubits): for gate in state_to_gates[q_state]: k.gate(gate, [q]) k.gate("wait", [], 0) # alignment - # k.gate("wait", [], 20) # alignment - - # for q_state, q in zip(comb, qubits): - # k.prepz(q) - # for gate in state_to_gates[q_state]: - # k.gate(gate, [q]) - # k.gate("wait", [], 0) # alignment - for q in qubits: k.measure(q) k.gate('wait', [], 0) # alignment diff --git a/pycqed/measurement/openql_experiments/single_qubit_oql.py b/pycqed/measurement/openql_experiments/single_qubit_oql.py index b606116fce..c7ca23cb7b 100644 --- a/pycqed/measurement/openql_experiments/single_qubit_oql.py +++ b/pycqed/measurement/openql_experiments/single_qubit_oql.py @@ -1,8 +1,8 @@ import numpy as np from deprecated import deprecated -from typing import List, Union - +from typing import List, Union, Optional from pycqed.measurement.randomized_benchmarking import randomized_benchmarking as rb + from pycqed.measurement.openql_experiments.openql_helpers import OqlProgram @@ -71,7 +71,7 @@ def pulsed_spec_seq( qubit_idx: int, spec_pulse_length: float, platf_cfg: str -) -> OqlProgram: + ) -> OqlProgram: """ Sequence for pulsed spectroscopy. @@ -105,7 +105,7 @@ def pulsed_spec_seq_marked( trigger_idx_2: int = None, wait_time_ns: int = 0, cc: str = 'CCL' -) -> OqlProgram: + ) -> OqlProgram: """ Sequence for pulsed spectroscopy, similar to old version. Difference is that this one triggers the 0th trigger port of the CCLight and uses the zeroth @@ -153,7 +153,7 @@ def pulsed_spec_seq_v2( spec_pulse_length: float, platf_cfg: str, trigger_idx: int -) -> OqlProgram: + ) -> OqlProgram: """ Sequence for pulsed spectroscopy, similar to old version. Difference is that this one triggers the 0th trigger port of the CCLight and usus the zeroth @@ -380,9 +380,10 @@ def flipping( equator: bool = False, cal_points: bool = True, flip_ef: bool = False, + flip_fh: bool = False, ax: str = 'x', angle: str = '180' -) -> OqlProgram: + ) -> OqlProgram: """ Generates a flipping sequence that performs multiple pi-pulses Basic sequence: @@ -417,12 +418,16 @@ def flipping( elif cal_points and (i == (len(number_of_flips)-2) or i == (len(number_of_flips)-1)): if ax == 'y': - k.y(qubit_idx) - elif flip_ef: - k.gate('rX12',[qubit_idx]) + k.gate('ry180', [qubit_idx]) else: - k.x(qubit_idx) - + k.gate('rx180', [qubit_idx]) + # Should probably have an rx12 here + # when doing ef flipping (Jorge) + if flip_ef: + k.gate('rX12',[qubit_idx]) + elif flip_fh: + k.gate('rX12',[qubit_idx]) + k.gate('rX23',[qubit_idx]) k.measure(qubit_idx) else: if equator: @@ -431,6 +436,10 @@ def flipping( elif flip_ef: k.gate('rx180', [qubit_idx]) k.gate('cw_15', [qubit_idx]) + elif flip_fh: + k.gate('rx180', [qubit_idx]) + k.gate('rx12', [qubit_idx]) + k.gate('cw_16', [qubit_idx]) else: k.gate('rx90', [qubit_idx]) for j in range(n): @@ -444,11 +453,16 @@ def flipping( k.gate('rx90', [qubit_idx]) elif flip_ef: k.gate('rX12',[qubit_idx]) + elif flip_fh: + k.gate('rX23',[qubit_idx]) else: k.x(qubit_idx) if flip_ef: k.gate('rx180',[qubit_idx]) + elif flip_fh: + k.gate('rx12',[qubit_idx]) + k.gate('rx180',[qubit_idx]) k.measure(qubit_idx) p.add_kernel(k) @@ -515,6 +529,7 @@ def AllXY( p.compile() return p + def depletion_AllXY(qubit_idx: int, platf_cfg: str): """ Plays an ALLXY sequence in two settings without and with @@ -537,7 +552,7 @@ def depletion_AllXY(qubit_idx: int, platf_cfg: str): k.prepz(qubit_idx) k.gate(xy[0], [qubit_idx]) k.gate(xy[1], [qubit_idx]) - # k.gate('wait', [qubit_idx], 500) + k.gate('wait', [qubit_idx], 1000) # Time for UHFQC to re-trigger k.measure(qubit_idx) p.add_kernel(k) @@ -546,14 +561,107 @@ def depletion_AllXY(qubit_idx: int, platf_cfg: str): k.measure(qubit_idx) k.gate(xy[0], [qubit_idx]) k.gate(xy[1], [qubit_idx]) - # k.gate('wait', [qubit_idx], 500) + k.gate('wait', [qubit_idx], 1000) # Time for UHFQC to re-trigger + k.measure(qubit_idx) + p.add_kernel(k) + + k = p.create_kernel("AllXY_{}_{}_1".format(i, j)) + k.prepz(qubit_idx) + k.gate('rx180', [qubit_idx]) + k.gate(xy[0], [qubit_idx]) + k.gate(xy[1], [qubit_idx]) + k.gate('wait', [qubit_idx], 1000) # Time for UHFQC to re-trigger + k.measure(qubit_idx) + p.add_kernel(k) + + k = p.create_kernel("AllXY_meas_{}_{}_1".format(i, j)) + k.prepz(qubit_idx) + k.gate('rx180', [qubit_idx]) + k.measure(qubit_idx) + k.gate(xy[0], [qubit_idx]) + k.gate(xy[1], [qubit_idx]) + k.gate('wait', [qubit_idx], 1000) # Time for UHFQC to re-trigger k.measure(qubit_idx) p.add_kernel(k) p.compile() return p + def T1(qubit_idx: int, + platf_cfg: str, + times: List[float]): + """ + Single qubit T1 sequence. + Writes output files to the directory specified in openql. + Output directory is set as an attribute to the program for convenience. + + Input pars: + times: the list of waiting times for each T1 element + qubit_idx: int specifying the target qubit (starting at 0) + platf_cfg: filename of the platform config file + Returns: + p: OpenQL Program object + + + """ + p = OqlProgram('T1', platf_cfg) + + for i, time in enumerate(times[:-4]): + k = p.create_kernel('T1_{}'.format(i)) + k.prepz(qubit_idx) + k.gate('rx180', [qubit_idx]) + wait_nanoseconds = int(round(time/1e-9)) + k.gate("wait", [qubit_idx], wait_nanoseconds) + k.measure(qubit_idx) + p.add_kernel(k) + + # adding the calibration points + p.add_single_qubit_cal_points(qubit_idx=qubit_idx) + + p.compile() + return p + + +def T1_vs_flux(qubit_idx: int, + platf_cfg: str, + times: List[float]): + """ + Single qubit T1 sequence vs flux. + Writes output files to the directory specified in openql. + Output directory is set as an attribute to the program for convenience. + + Input pars: + times: the list of waiting times for each T1 element + qubit_idx: int specifying the target qubit (starting at 0) + platf_cfg: filename of the platform config file + Returns: + p: OpenQL Program object + + + """ + p = OqlProgram('T1_vs_flux', platf_cfg) + + for i, time in enumerate(times[:-4]): + k = p.create_kernel('T1_{}'.format(i)) + k.prepz(qubit_idx) + k.gate('rx180', [qubit_idx]) + wait_nanoseconds = int(round(time/1e-9)) + for i in range(int(wait_nanoseconds/60)): + k.gate("sf_park", [qubit_idx]) + k.gate("i", [qubit_idx]) + k.gate("wait", [qubit_idx], 0) + k.measure(qubit_idx) + p.add_kernel(k) + + # adding the calibration points + p.add_single_qubit_cal_points(qubit_idx=qubit_idx) + + p.compile() + return p + + +def T1_under_flux_dance(qubit_idx: int, platf_cfg: str, times: List[float], nr_cz_instead_of_idle_time: List[int]=None, @@ -576,7 +684,7 @@ def T1(qubit_idx: int, """ - p = OqlProgram('T1', platf_cfg) + p = OqlProgram('T1_under_flux_dance', platf_cfg) for i, time in enumerate(times[:-4]): k = p.create_kernel('T1_{}'.format(i)) @@ -738,6 +846,83 @@ def Ramsey( qubit_idx: int, platf_cfg: str, times: List[float], + ): + """ + Single qubit Ramsey sequence. + Writes output files to the directory specified in openql. + Output directory is set as an attribute to the program for convenience. + + Input pars: + times: the list of waiting times for each Ramsey element + qubit_idx: int specifying the target qubit (starting at 0) + platf_cfg: filename of the platform config file + Returns: + p: OpenQL Program object + + """ + p = OqlProgram("Ramsey", platf_cfg) + + for i, time in enumerate(times[:-4]): + k = p.create_kernel("Ramsey_{}".format(i)) + k.prepz(qubit_idx) + k.gate('rx90', [qubit_idx]) + wait_nanoseconds = int(round(time/1e-9)) + k.gate("wait", [qubit_idx], wait_nanoseconds) + k.gate('rx90', [qubit_idx]) + k.measure(qubit_idx) + p.add_kernel(k) + + # adding the calibration points + p.add_single_qubit_cal_points(qubit_idx=qubit_idx) + + p.compile() + return p + + +def Ramsey_vs_flux( + qubit_idx: int, + platf_cfg: str, + times: List[float], + ): + """ + Single qubit Ramsey sequence. + Writes output files to the directory specified in openql. + Output directory is set as an attribute to the program for convenience. + + Input pars: + times: the list of waiting times for each Ramsey element + qubit_idx: int specifying the target qubit (starting at 0) + platf_cfg: filename of the platform config file + Returns: + p: OpenQL Program object + + """ + p = OqlProgram("Ramsey_vs_flux", platf_cfg) + + for i, time in enumerate(times[:-4]): + k = p.create_kernel("Ramsey_{}".format(i)) + k.prepz(qubit_idx) + k.gate('rx90', [qubit_idx]) + wait_nanoseconds = int(round(time/1e-9)) + for i in range(int(wait_nanoseconds/60)): + k.gate("sf_park", [qubit_idx]) + k.gate("i", [qubit_idx]) + k.gate("wait", [qubit_idx], 0) + k.gate('rx90', [qubit_idx]) + k.measure(qubit_idx) + p.add_kernel(k) + + # adding the calibration points + p.add_single_qubit_cal_points(qubit_idx=qubit_idx) + + p.compile() + return p + + +def Ramsey_under_flux_dance( + qubit_idx: int, + platf_cfg: str, + times: List[float], nr_cz_instead_of_idle_time: List[int]=None, qb_cz_idx: str=None, cw_cz_instead_of_idle_time: str='cz' @@ -755,7 +940,7 @@ def Ramsey( p: OpenQL Program object """ - p = OqlProgram("Ramsey", platf_cfg) + p = OqlProgram("Ramsey_under_flux_dance", platf_cfg) for i, time in enumerate(times[:-4]): k = p.create_kernel("Ramsey_{}".format(i)) @@ -885,7 +1070,7 @@ def complex_Ramsey(times, qubit_idx: int, platf_cfg: str) -> OqlProgram: return p -def echo(times, qubit_idx: int, platf_cfg: str, delta_phase: int = 40) -> OqlProgram: +def echo(times, qubit_idx: int, platf_cfg: str) -> OqlProgram: """ Single qubit Echo sequence. Writes output files to the directory specified in openql. @@ -895,7 +1080,6 @@ def echo(times, qubit_idx: int, platf_cfg: str, delta_phase: int = 40) -> OqlPro times: the list of waiting times for each Echo element qubit_idx: int specifying the target qubit (starting at 0) platf_cfg: filename of the platform config file - delta_phase: acrued phase due to artificial detuning Returns: p: OpenQL Program object @@ -904,16 +1088,17 @@ def echo(times, qubit_idx: int, platf_cfg: str, delta_phase: int = 40) -> OqlPro for i, time in enumerate(times[:-4]): - angle = (i*delta_phase) % 360 - cw_idx = angle//20 + 9 - wait_nanoseconds = int(round(time*1e9 / 2)) - k = p.create_kernel("echo_{}".format(i)) k.prepz(qubit_idx) + # nr_clocks = int(time/20e-9/2) + wait_nanoseconds = int(round(time/1e-9/2)) k.gate('rx90', [qubit_idx]) k.gate("wait", [qubit_idx], wait_nanoseconds) k.gate('rx180', [qubit_idx]) k.gate("wait", [qubit_idx], wait_nanoseconds) + # k.gate('rx90', [qubit_idx]) + angle = (i*40) % 360 + cw_idx = angle//20 + 9 if angle == 0: k.gate('rx90', [qubit_idx]) else: @@ -1101,7 +1286,7 @@ def spin_lock_simple( platf_cfg: str, mw_gate_duration: float = 40e-9, tomo: bool = False -) -> OqlProgram: + ) -> OqlProgram: """ Single qubit Echo sequence. Writes output files to the directory specified in openql. @@ -1158,7 +1343,7 @@ def rabi_frequency( platf_cfg: str, mw_gate_duration: float = 40e-9, tomo: bool = False -) -> OqlProgram: + ) -> OqlProgram: """ Rabi Sequence consisting out of sequence of square pulses Writes output files to the directory specified in openql. @@ -1175,12 +1360,12 @@ def rabi_frequency( p = OqlProgram("rabi_frequency", platf_cfg) if tomo: - tomo_gates = ['I','rX180'] + tomo_gates = ['I','rX180','rX12'] else: tomo_gates = ['I'] if tomo: - timeloop = times[:-4][::2] + timeloop = times[:-6][::3] else: timeloop = times[:-4] @@ -1193,17 +1378,13 @@ def rabi_frequency( leftover_us = (time-square_us_cycles*1e-6) square_ns_cycles = np.floor((leftover_us+1e-10)/mw_gate_duration).astype(int) leftover_ns = (leftover_us-square_ns_cycles*mw_gate_duration) - # print(leftover_us) - # print(leftover_ns) - # mwlutman_index = np.round((leftover_ns+1e-10)/4e-9).astype(int) - # print(mwlutman_index) print("square_us_cycles", square_us_cycles) print("square_ns_cycles", square_ns_cycles) for suc in range(square_us_cycles): k.gate('cw_10', [qubit_idx]) # make sure that the square pulse lasts 1us for snc in range(square_ns_cycles): k.gate('cw_11', [qubit_idx]) # make sure that the square pulse lasts mw_gate_duration ns - # k.gate('cw_{}'.format(mwlutman_index+11), [qubit_idx]) + k.gate('cw_{}'.format(mwlutman_index+11), [qubit_idx]) if tomo: k.gate(tomo_gate,[qubit_idx]) k.measure(qubit_idx) @@ -1272,7 +1453,7 @@ def idle_error_rate_seq( qubit_idx: int, platf_cfg: str, post_select=True -) -> OqlProgram: + ) -> OqlProgram: """ Sequence to perform the idle_error_rate_sequence. Virtually identical to a T1 experiment (Z-basis) @@ -1345,15 +1526,10 @@ def single_elt_on(qubit_idx: int, platf_cfg: str) -> OqlProgram: def off_on( qubit_idx: int, - pulse_comb: str, - initialize: bool, - platf_cfg: str, - nr_flux_after_init: float=None, - flux_cw_after_init: Union[str, List[str]]=None, - fluxed_qubit_idx: int=None, - wait_time_after_flux: float=0, - cross_driving_qubit: int=None, - ) -> OqlProgram: + pulse_comb: str, + initialize: bool, + platf_cfg: str + ): """ Performs an 'off_on' sequence on the qubit specified. @@ -1378,18 +1554,6 @@ def off_on( k.prepz(qubit_idx) if initialize: k.measure(qubit_idx) - - if nr_flux_after_init and flux_cw_after_init: - if fluxed_qubit_idx is None: - fluxed_qubit_idx = qubit_idx - for i in range(int(nr_flux_after_init)): - if type(flux_cw_after_init) == list: - for cw in flux_cw_after_init: - k.gate(cw, [fluxed_qubit_idx]) - else: - k.gate(flux_cw_after_init, [fluxed_qubit_idx]) - k.gate("wait", [], wait_time_after_flux) - k.measure(qubit_idx) p.add_kernel(k) @@ -1398,50 +1562,99 @@ def off_on( k.prepz(qubit_idx) if initialize: k.measure(qubit_idx) - - if nr_flux_after_init and flux_cw_after_init: - if fluxed_qubit_idx is None: - fluxed_qubit_idx = qubit_idx - for i in range(int(nr_flux_after_init)): - if type(flux_cw_after_init) == list: - for cw in flux_cw_after_init: - k.gate(cw, [fluxed_qubit_idx]) - else: - k.gate(flux_cw_after_init, [fluxed_qubit_idx]) - k.gate("wait", [], wait_time_after_flux) - - - # k.gate('rx180', [qubit_idx]) - if cross_driving_qubit is not None: - k.gate('rx180', [cross_driving_qubit]) - k.gate("i", [qubit_idx]) - k.gate("wait", []) - else: - k.gate('rx180', [qubit_idx]) - - k.gate("wait", []) - + k.gate('rx180', [qubit_idx]) k.measure(qubit_idx) p.add_kernel(k) if 'two' in pulse_comb.lower(): k = p.create_kernel("two") k.prepz(qubit_idx) + if initialize: + k.measure(qubit_idx) k.gate('rx180', [qubit_idx]) k.gate('rx12', [qubit_idx]) - k.gate("wait", []) + k.measure(qubit_idx) + p.add_kernel(k) + if 'three' in pulse_comb.lower(): + k = p.create_kernel("three") + k.prepz(qubit_idx) + if initialize: + k.measure(qubit_idx) + k.gate('rx180', [qubit_idx]) + k.gate('rx12', [qubit_idx]) + k.gate('rx23', [qubit_idx]) k.measure(qubit_idx) p.add_kernel(k) - if ('on' not in pulse_comb.lower()) and ('off' not in pulse_comb.lower()) and ('two' not in pulse_comb.lower()): + if ('on' not in pulse_comb.lower()) and ('off' not in pulse_comb.lower())\ + and ('two' not in pulse_comb.lower()) and ('three' not in pulse_comb.lower()): raise ValueError(f"pulse_comb {pulse_comb} has to contain only 'on' and 'off'.") p.compile() return p + +def off_on_mw_crosstalk( + qubit_idx: int, + pulse_comb: str, + initialize: bool, + platf_cfg: str, + cross_driving_qubit: int=None, + ): + + """ + Performs an 'off_on' sequence on the qubit specified. + off: (RO) - prepz - - RO + on: (RO) - prepz - x180 - RO + Args: + qubit_idx (int) : + pulse_comb (list): What pulses to play valid options are + "off", "on", "off_on" + initialize (bool): if True does an extra initial measurement to + post select data. + platf_cfg (str) : filepath of OpenQL platform config file + + Pulses can be optionally enabled by putting 'off', respectively 'on' in + the pulse_comb string. + """ + p = OqlProgram('off_on_mw_crosstalk', platf_cfg) + + # # Off + if 'off' in pulse_comb.lower(): + k = p.create_kernel("off") + k.prepz(qubit_idx) + if initialize: + k.measure(qubit_idx) + k.measure(qubit_idx) + p.add_kernel(k) + + if 'on' in pulse_comb.lower(): + k = p.create_kernel("on") + k.prepz(qubit_idx) + if initialize: + k.measure(qubit_idx) + + if cross_driving_qubit is not None: + k.gate('rx180', [cross_driving_qubit]) + k.gate("i", [qubit_idx]) + k.gate("wait", []) + else: + k.gate('rx180', [qubit_idx]) + k.measure(qubit_idx) + p.add_kernel(k) + + if ('on' not in pulse_comb.lower()) and ('off' not in pulse_comb.lower()): + raise ValueError(f"pulse_comb {pulse_comb} has to contain only 'on' and 'off'.") + + p.compile() + return p + + def RO_QND_sequence(q_idx, - platf_cfg: str) -> OqlProgram: + platf_cfg: str, + f_state: bool = False + ) -> OqlProgram: ''' RO QND sequence. ''' @@ -1449,7 +1662,6 @@ def RO_QND_sequence(q_idx, p = OqlProgram("RO_QND_sequence", platf_cfg) k = p.create_kernel("Experiment") - k.prepz(q_idx) k.gate('rx90', [q_idx]) k.measure(q_idx) @@ -1469,15 +1681,25 @@ def RO_QND_sequence(q_idx, k.measure(q_idx) p.add_kernel(k) + if f_state: + k = p.create_kernel("Init_2") + k.prepz(q_idx) + k.gate('rx180', [q_idx]) + k.gate('rx12', [q_idx]) + k.measure(q_idx) + p.add_kernel(k) + p.compile() return p -def butterfly(qubit_idx: int, initialize: bool, platf_cfg: str) -> OqlProgram: + +def butterfly(qubit_idx: int, f_state: bool, platf_cfg: str) -> OqlProgram: """ Performs a 'butterfly' sequence on the qubit specified. - 0: prepz (RO) - - RO - RO + 0: prepz (RO) - RO - RO 1: prepz (RO) - x180 - RO - RO + 2: prepz (RO) - x180 - rx12 - RO - RO Args: qubit_idx (int) : index of the qubit @@ -1490,21 +1712,48 @@ def butterfly(qubit_idx: int, initialize: bool, platf_cfg: str) -> OqlProgram: k = p.create_kernel('0') k.prepz(qubit_idx) - if initialize: - k.measure(qubit_idx) + k.measure(qubit_idx) k.measure(qubit_idx) k.measure(qubit_idx) p.add_kernel(k) k = p.create_kernel('1') k.prepz(qubit_idx) - if initialize: + k.measure(qubit_idx) + k.gate('rX180',[qubit_idx]) + k.measure(qubit_idx) + k.measure(qubit_idx) + p.add_kernel(k) + + if f_state: + k = p.create_kernel('2') + k.prepz(qubit_idx) k.measure(qubit_idx) - k.x(qubit_idx) + k.gate('rX180',[qubit_idx]) + k.gate('rx12',[qubit_idx]) + k.measure(qubit_idx) + k.measure(qubit_idx) + p.add_kernel(k) + + k = p.create_kernel("Init_0") + k.prepz(qubit_idx) k.measure(qubit_idx) + p.add_kernel(k) + + k = p.create_kernel("Init_1") + k.prepz(qubit_idx) + k.gate('rx180', [qubit_idx]) k.measure(qubit_idx) p.add_kernel(k) + if f_state: + k = p.create_kernel("Init_2") + k.prepz(qubit_idx) + k.gate('rx180', [qubit_idx]) + k.gate('rx12', [qubit_idx]) + k.measure(qubit_idx) + p.add_kernel(k) + p.compile() return p @@ -1516,7 +1765,7 @@ def RTE( platf_cfg: str, net_gate: str, feedback=False -) -> OqlProgram: + ) -> OqlProgram: """ Creates a sequence for the rounds to event (RTE) experiment @@ -1586,7 +1835,7 @@ def randomized_benchmarking( program_name: str = 'randomized_benchmarking', cal_points: bool = True, double_curves: bool = False -) -> OqlProgram: + ) -> OqlProgram: ''' Input pars: qubit_idx: int specifying the target qubit (starting at 0) @@ -1648,7 +1897,7 @@ def motzoi_XY( qubit_idx: int, platf_cfg: str, program_name: str = 'motzoi_XY' -) -> OqlProgram: + ) -> OqlProgram: ''' Sequence used for calibrating the motzoi parameter. Consists of yX and xY @@ -1705,7 +1954,7 @@ def FluxTimingCalibration( flux_cw: str = 'fl_cw_02', # FIXME: unused cal_points: bool = True, mw_gate: str = "rx90" -) -> OqlProgram: + ) -> OqlProgram: """ A Ramsey sequence with varying waiting times `times` around a flux pulse. """ @@ -1743,7 +1992,7 @@ def TimingCalibration_1D( platf_cfg: str, # flux_cw: str = 'fl_cw_02', # FIXME: unused cal_points: bool = True -) -> OqlProgram: + ) -> OqlProgram: """ A Ramsey sequence with varying waiting times `times`in between. It calibrates the timing between spec and measurement pulse. @@ -1881,9 +2130,10 @@ def ef_rabi_seq( q0: int, amps: list, platf_cfg: str, + measure_3rd_state: bool = False, recovery_pulse: bool = True, add_cal_points: bool = True -) -> OqlProgram: + ) -> OqlProgram: """ Sequence used to calibrate pulses for 2nd excited state (ef/12 transition) @@ -1904,13 +2154,17 @@ def ef_rabi_seq( # These angles correspond to special pi/2 pulses in the lutman for i, amp in enumerate(amps): # cw_idx corresponds to special hardcoded pulses in the lutman - cw_idx = i + 9 + cw_idx = i + 30 k = p.create_kernel("ef_A{}_{}".format(int(abs(1000*amp)),i)) k.prepz(q0) k.gate('rx180', [q0]) + if measure_3rd_state: + k.gate('rx12', [q0]) k.gate('cw_{:02}'.format(cw_idx), [q0]) if recovery_pulse: + if measure_3rd_state: + k.gate('rx12', [q0]) k.gate('rx180', [q0]) k.measure(q0) p.add_kernel(k) @@ -1929,87 +2183,300 @@ def ef_rabi_seq( return p -def Depletion(time, qubit_idx: int, platf_cfg: str, double_points: bool) -> OqlProgram: - """ - Input pars: - times: the list of waiting times for each ALLXY element - qubit_idx: int specifying the target qubit (starting at 0) - platf_cfg: filename of the platform config file - Returns: - p: OpenQL Program object - """ +def LRU_experiment( + qubit_idx: int, + LRU_duration_ns: int, + heralded_init: bool, + platf_cfg: str, + h_state: bool = False) -> OqlProgram: - allXY = [['i', 'i'], ['rx180', 'rx180'], ['ry180', 'ry180'], - ['rx180', 'ry180'], ['ry180', 'rx180'], - ['rx90', 'i'], ['ry90', 'i'], ['rx90', 'ry90'], - ['ry90', 'rx90'], ['rx90', 'ry180'], ['ry90', 'rx180'], - ['rx180', 'ry90'], ['ry180', 'rx90'], ['rx90', 'rx180'], - ['rx180', 'rx90'], ['ry90', 'ry180'], ['ry180', 'ry90'], - ['rx180', 'i'], ['ry180', 'i'], ['rx90', 'rx90'], - ['ry90', 'ry90']] + if LRU_duration_ns%20 >0: + LRU_duration_ns = (LRU_duration_ns//20)*20 + 20 + p = OqlProgram('LRU_experiment', platf_cfg) + # Calibration_points + k = p.create_kernel("cal_0") + k.prepz(qubit_idx) + if heralded_init: + k.measure(qubit_idx) + k.measure(qubit_idx) + p.add_kernel(k) - p = OqlProgram('Depletion', platf_cfg) + k = p.create_kernel("cal_1") + k.prepz(qubit_idx) + if heralded_init: + k.measure(qubit_idx) + k.gate('rx180', [qubit_idx]) + k.measure(qubit_idx) + p.add_kernel(k) - if 0: # FIXME: p.set_sweep_points has been replaced by p.sweep_points, since that was missing here they are probably not necessary for this function - p.set_sweep_points(np.arange(len(allXY), dtype=float)) + k = p.create_kernel("cal_2") + k.prepz(qubit_idx) + if heralded_init: + k.measure(qubit_idx) + k.gate('rx180', [qubit_idx]) + k.gate('rx12', [qubit_idx]) + k.measure(qubit_idx) + p.add_kernel(k) - if double_points: - js=2 - else: - js=1 + k = p.create_kernel("cal_LRU") + k.prepz(qubit_idx) + if heralded_init: + k.measure(qubit_idx) + k.gate('rx180', [qubit_idx]) + k.gate('rx12', [qubit_idx]) + k.gate("wait", []) + k.gate('lru', [qubit_idx]) + k.gate("wait", [], LRU_duration_ns-20) + k.measure(qubit_idx) + p.add_kernel(k) - for i, xy in enumerate(allXY): - for j in range(js): - k = p.create_kernel('Depletion_{}_{}'.format(i, j)) - # Prepare qubit - k.prepz(qubit_idx) - # Initial measurement + if h_state: + k = p.create_kernel("cal_3") + k.prepz(qubit_idx) + if heralded_init: k.measure(qubit_idx) - # Wait time - wait_nanoseconds = int(round(time/1e-9)) - k.gate("wait", [qubit_idx], wait_nanoseconds) - # AllXY pulse - k.gate(xy[0], [qubit_idx]) - k.gate(xy[1], [qubit_idx]) - # Final measurement + k.gate('rx180', [qubit_idx]) + k.gate('rx12', [qubit_idx]) + k.gate('rx23', [qubit_idx]) + k.measure(qubit_idx) + p.add_kernel(k) + + k = p.create_kernel("cal_LRU_3rd") + k.prepz(qubit_idx) + if heralded_init: k.measure(qubit_idx) - p.add_kernel(k) + k.gate('rx180', [qubit_idx]) + k.gate('rx12', [qubit_idx]) + k.gate('rx23', [qubit_idx]) + k.gate("wait", []) + k.gate('lru', [qubit_idx]) + k.gate("wait", [], LRU_duration_ns-20) + # k.gate("wait", [], 200) + # k.gate('rx23', [qubit_idx]) + # k.gate("wait", [], 220) + k.measure(qubit_idx) + p.add_kernel(k) p.compile() return p -def TEST_RTE( +def LRU_repeated_experiment( qubit_idx: int, + rounds: int, + LRU_duration_ns: int, + heralded_init: bool, platf_cfg: str, - measurements: int) -> OqlProgram: - """ - - """ - p = OqlProgram('RTE', platf_cfg) + h_state: bool=False, + leak_3rd_state: bool = False) -> OqlProgram: + + if LRU_duration_ns%20 >0: + LRU_duration_ns = int((LRU_duration_ns//20)*20 + 20) + p = OqlProgram('LRU_repeated_experiment', platf_cfg) + # Main experiment + k = p.create_kernel("Gate experiment") + k.prepz(qubit_idx) + if heralded_init: + k.measure(qubit_idx) + for i in range(rounds): + k.gate('rx90', [qubit_idx]) + k.gate("wait", []) + if leak_3rd_state: + k.gate('rx12', [qubit_idx]) + k.gate('cw_10', [qubit_idx]) + if leak_3rd_state: + k.gate('rx12', [qubit_idx]) + k.measure(qubit_idx) + k.gate("wait", []) + # k.gate('lru', [qubit_idx]) # X3 + # k.gate('lru', [12]) # Z3 + # k.gate('lru', [10]) # Z4 + # k.gate('lru', [9]) # X4 + # k.gate('lru', [7]) # Z1 + # k.gate('lru', [14]) # Z2 + # LRU Data qubits + k.gate('lru', [13]) # D5 + k.gate('lru', [15]) # D4 + k.gate('lru', [16]) # D6 + k.gate("wait", [21, 22, 23, 24], LRU_duration_ns-40) + k.gate("wait", [17, 18, 19], LRU_duration_ns-20) + k.gate('update_ph_LRU', [qubit_idx]) + # for q in [6,2,0,15,13,16,1,5,4]: + # # for q in [6,2,0,15,13,1,5,4]: + # k.gate("i", [q]) + # k.gate("i", [q]) + # k.gate("i", [q]) + # k.gate("i", [q]) + # k.gate("i", [q]) + # k.gate('rX180', [q]) + # k.gate("i", [q]) + # k.gate("i", [q]) + # k.gate("i", [q]) + # k.gate("i", [q]) + # k.gate("i", [q]) + k.gate("wait", []) - k = p.create_kernel('RTE') + p.add_kernel(k) + # Main experiment + k = p.create_kernel("Reference_experiment") k.prepz(qubit_idx) - ###################### - # Parity check - ###################### - for m in range(measurements): - # Superposition + if heralded_init: + k.measure(qubit_idx) + for i in range(rounds): k.gate('rx90', [qubit_idx]) - # CZ emulation - k.gate('i', [qubit_idx]) - k.gate('i', [qubit_idx]) - k.gate('i', [qubit_idx]) - # Refocus + if leak_3rd_state: + k.gate('rx12', [qubit_idx]) + k.gate('cw_10', [qubit_idx]) + if leak_3rd_state: + k.gate('rx12', [qubit_idx]) + k.measure(qubit_idx) + k.gate("wait", []) + # for q in [6,2,0,15,13,16,1,5,4]: + # for q in [6,2,0,15,13,1,5,4]: + # k.gate("i", [q]) + # k.gate("i", [q]) + # k.gate("i", [q]) + # k.gate("i", [q]) + # k.gate("i", [q]) + # k.gate('rX180', [q]) + # k.gate("i", [q]) + # k.gate("i", [q]) + # k.gate("i", [q]) + # k.gate("i", [q]) + # k.gate("i", [q]) + k.gate("wait", []) + # k.gate("wait", [], LRU_duration_ns) + p.add_kernel(k) + # Calibration_points + k = p.create_kernel("cal_0") + k.prepz(qubit_idx) + if heralded_init: + k.measure(qubit_idx) + k.measure(qubit_idx) + p.add_kernel(k) + + k = p.create_kernel("cal_1") + k.prepz(qubit_idx) + if heralded_init: + k.measure(qubit_idx) + k.gate('rx180', [qubit_idx]) + k.measure(qubit_idx) + p.add_kernel(k) + + k = p.create_kernel("cal_2") + k.prepz(qubit_idx) + if heralded_init: + k.measure(qubit_idx) + k.gate('rx180', [qubit_idx]) + k.gate('rx12', [qubit_idx]) + k.measure(qubit_idx) + p.add_kernel(k) + + k = p.create_kernel("cal_LRU") + k.prepz(qubit_idx) + if heralded_init: + k.measure(qubit_idx) + k.gate('rx180', [qubit_idx]) + k.gate('rx12', [qubit_idx]) + k.gate("wait", []) + k.gate('lru', [qubit_idx]) + k.gate("wait", [], LRU_duration_ns-20) + k.measure(qubit_idx) + p.add_kernel(k) + + if h_state: + k = p.create_kernel("cal_3") + k.prepz(qubit_idx) + if heralded_init: + k.measure(qubit_idx) k.gate('rx180', [qubit_idx]) - # CZ emulation - k.gate('i', [qubit_idx]) - k.gate('i', [qubit_idx]) - k.gate('i', [qubit_idx]) - # Recovery pulse - k.gate('rx90', [qubit_idx]) + k.gate('rx12', [qubit_idx]) + k.gate('rx23', [qubit_idx]) k.measure(qubit_idx) + p.add_kernel(k) + # Compile + p.compile() + + return p + + +def LRU_process_tomograhpy( + qubit_idx: int, + LRU_duration_ns: int, + platf_cfg: str, + idle: bool = False): + if LRU_duration_ns%20 >0: + LRU_duration_ns = (LRU_duration_ns//20)*20 + 20 + states = {'0': 'i', + '1': 'rx180', + 'p': 'ry90', + 'm': 'rym90', + 'pi': 'rxm90', + 'mi': 'rx90'} + meas_bases = {'Z':'i', + 'X':'rym90', + 'Y':'rx90'} + p = OqlProgram('LRU_process_tomo', platf_cfg) + for state in states.keys(): + for basis in meas_bases.keys(): + k = p.create_kernel(f'state_{state}_tomo_{basis}') + # State preparation + k.prepz(qubit_idx) + k.gate(states[state], [qubit_idx]) + # LRU gate + k.gate('wait', [], 0) + if idle: + k.gate('wait', [], 20) + else: + # # Single qubit LRU assessment + # k.gate('lru', [qubit_idx]) + # # k.gate('update_ph_LRU', [qubit_idx]) + # k.gate('wait', [], LRU_duration_ns-20) + # Multi qubit LRUs + k.gate('lru', [13]) # D5 + k.gate('lru', [15]) # D4 + k.gate('lru', [16]) # D6 + # k.gate('lru', [12]) # Z3 + # k.gate('lru', [10]) # Z4 + # k.gate('lru', [7]) # Z1 + # k.gate('lru', [14]) # Z2 + # k.gate('lru', [3]) # X3 + # k.gate('lru', [9]) # X4 + k.gate('wait', [20, 21, 22, 23, 24, 25], LRU_duration_ns-40) + k.gate('wait', [17, 18, 19], LRU_duration_ns-20) + k.gate('update_ph_LRU', [qubit_idx]) + # k.gate("wait", [], ) + k.gate('wait', [], 20) + # Measurement in basis + k.gate(meas_bases[basis], [qubit_idx]) + k.measure(qubit_idx) + p.add_kernel(k) + # Calibration_points + k = p.create_kernel("cal_0") + k.prepz(qubit_idx) + k.measure(qubit_idx) + p.add_kernel(k) + k = p.create_kernel("cal_1") + k.prepz(qubit_idx) + k.gate('rx180', [qubit_idx]) + k.measure(qubit_idx) + p.add_kernel(k) + k = p.create_kernel("cal_2") + k.prepz(qubit_idx) + k.gate('rx180', [qubit_idx]) + k.gate('rx12', [qubit_idx]) + k.measure(qubit_idx) + p.add_kernel(k) + k = p.create_kernel("cal_LRU") + k.prepz(qubit_idx) + k.gate('rx180', [qubit_idx]) + k.gate('rx12', [qubit_idx]) + k.gate("wait", []) + k.gate('lru', [qubit_idx]) + k.gate('wait', [], LRU_duration_ns-20) + k.measure(qubit_idx) p.add_kernel(k) + # Compile p.compile() return p diff --git a/pycqed/measurement/sweep_functions.py b/pycqed/measurement/sweep_functions.py index 60157057c9..771c8d5d54 100644 --- a/pycqed/measurement/sweep_functions.py +++ b/pycqed/measurement/sweep_functions.py @@ -95,11 +95,10 @@ class Elapsed_Time_Sweep(Soft_Sweep): def __init__(self, sweep_control='soft', as_fast_as_possible: bool=False, **kw): super().__init__() + self.sweep_control = sweep_control self.name = 'Elapsed_Time_Sweep' self.parameter_name = 'Time' self.unit = 's' - self.sweep_control = sweep_control - self.as_fast_as_possible = as_fast_as_possible self.time_first_set = None @@ -151,11 +150,10 @@ def __init__(self, """ super(Heterodyne_Frequency_Sweep, self).__init__() + self.sweep_control = sweep_control self.name = 'Heterodyne frequency' self.parameter_name = 'Frequency' self.unit = 'Hz' - self.sweep_control = sweep_control - self.RO_pulse_type = RO_pulse_type self.sweep_points = sweep_points self.LO_source = LO_source @@ -183,7 +181,6 @@ def __init__(self, MW_LO_source, IF, self.parameter_name = 'Frequency' self.unit = 'Hz' self.sweep_points = sweep_points - self.MW_LO_source = MW_LO_source self.IF = IF @@ -201,10 +198,10 @@ def __init__(self, sweep_control='soft', sweep_points=None, unit: str='arb. unit', **kw): super(None_Sweep, self).__init__() + self.sweep_control = sweep_control self.name = name self.parameter_name = parameter_name self.unit = unit - self.sweep_control = sweep_control self.sweep_points = sweep_points def set_parameter(self, val): @@ -222,8 +219,8 @@ def __init__(self, sweep_control='soft', sweep_points=None, unit: str='arb. unit', **kw): super().__init__() - self.name = name self.sweep_control = sweep_control + self.name = name self.parameter_name = parameter_name self.unit = unit self.sweep_points = sweep_points @@ -252,11 +249,10 @@ class Delayed_None_Sweep(Soft_Sweep): def __init__(self, sweep_control='soft', delay=0, **kw): super().__init__() + self.sweep_control = sweep_control self.name = 'None_Sweep' self.parameter_name = 'pts' self.unit = 'arb. unit' - self.sweep_control = sweep_control - self.delay = delay self.time_last_set = 0 if delay > 60: @@ -278,11 +274,10 @@ class AWG_amp(Soft_Sweep): def __init__(self, channel, AWG): super().__init__() self.name = 'AWG Channel Amplitude' - self.parameter_name = 'AWG_ch{}_amp'.format(channel) - self.unit = 'V' - self.channel = channel + self.parameter_name = 'AWG_ch{}_amp'.format(channel) self.AWG = AWG + self.unit = 'V' def prepare(self): pass @@ -309,7 +304,6 @@ def __init__(self, AWG, channels, delay=0, **kw): self.name = 'AWG channel amplitude chs %s' % channels self.parameter_name = 'AWG chs %s' % channels self.unit = 'V' - self.AWG = AWG self.channels = channels self.delay = delay @@ -319,20 +313,18 @@ def set_parameter(self, val): self.AWG.set('ch{}_amp'.format(ch), val) time.sleep(self.delay) - class mw_lutman_amp_sweep(Soft_Sweep): """ """ def __init__(self,qubits,device): super().__init__() + self.device = device self.name = 'mw_lutman_amp_sweep' + self.qubits = qubits self.parameter_name = 'mw_amp' self.unit = 'a.u.' - self.device = device - self.qubits = qubits - def set_parameter(self, val): for q in self.qubits: qub = self.device.find_instrument(q) @@ -346,19 +338,197 @@ class motzoi_lutman_amp_sweep(Soft_Sweep): def __init__(self,qubits,device): super().__init__() + self.device = device self.name = 'motzoi_lutman_amp_sweep' + self.qubits = qubits self.parameter_name = 'motzoi_amp' self.unit = 'a.u.' - self.device = device - self.qubits = qubits - def set_parameter(self, val): for q in self.qubits: qub = self.device.find_instrument(q) mw_lutman = qub.instr_LutMan_MW.get_instr() mw_lutman.mw_motzoi(val) - mw_lutman.load_waveforms_onto_AWG_lookuptable(regenerate_waveforms=True) + mw_lutman.load_waveforms_onto_AWG_lookuptable( + regenerate_waveforms=True) + +############################################################################### +#################### Hardware Sweeps ############################ +############################################################################### + + +class Hard_Sweep(Sweep_function): + + def __init__(self, **kw): + super(Hard_Sweep, self).__init__() + self.sweep_control = 'hard' + self.parameter_name = 'None' + self.name = 'Hard_Sweep' + self.unit = 'a.u.' + + def start_acquistion(self): + pass + + +class OpenQL_Sweep(Hard_Sweep): + + def __init__(self, openql_program, CCL, + parameter_name: str ='Points', unit: str='a.u.', + upload: bool=True): + super().__init__() + self.name = 'OpenQL_Sweep' + self.openql_program = openql_program + self.CCL = CCL + self.upload = upload + self.parameter_name = parameter_name + self.unit = unit + + def prepare(self, **kw): + if self.upload: + self.CCL.eqasm_program(self.openql_program.filename) + + +class OpenQL_File_Sweep(Hard_Sweep): + + def __init__(self, filename: str, CCL, + parameter_name: str ='Points', unit: str='a.u.', + upload: bool=True): + super().__init__() + self.name = 'OpenQL_Sweep' + self.filename = filename + self.CCL = CCL + self.upload = upload + self.parameter_name = parameter_name + self.unit = unit + + def prepare(self, **kw): + if self.upload: + self.CCL.eqasm_program(self.filename) + + +class ZNB_VNA_sweep(Hard_Sweep): + + def __init__(self, VNA, + start_freq=None, stop_freq=None, + center_freq=None, span=None, + segment_list=None, + npts=100, force_reset=False): + ''' + Frequencies are in Hz. + Defines the frequency sweep using one of the following methods: + 1) start a and stop frequency + 2) center frequency and span + 3) segment sweep (this requires a list of elements. Each element fully + defines a sweep) + segment_list = [[start_frequency, stop_frequency, nbr_points, + power, segment_time, mesurement_delay, bandwidth], + [elements for segment #2], + ..., + [elements for segment #n]] + + If force_reset = True the VNA is reset to default settings + ''' + super(ZNB_VNA_sweep, self).__init__() + self.VNA = VNA + self.name = 'ZNB_VNA_sweep' + self.parameter_name = 'frequency' + self.unit = 'Hz' + self.filename = 'VNA_sweep' + + self.start_freq = start_freq + self.stop_freq = stop_freq + self.center_freq = center_freq + self.segment_list = segment_list + self.span = span + self.npts = npts + + if force_reset == True: + VNA.reset() + + def prepare(self): + ''' + Prepare the VNA for measurements by defining basic settings. + Set the frequency sweep and get the frequency points back from the insturment + ''' + self.VNA.continuous_mode_all('off') # measure only if required + # optimize the sweep time for the fastest measurement + self.VNA.min_sweep_time('on') + # start a measurement once the trigger signal arrives + self.VNA.trigger_source('immediate') + # trigger signal is generated with the command: + # VNA.start_sweep_all() + self.VNA.rf_on() + if self.segment_list == None: + self.VNA.sweep_type('linear') # set a linear sweep + if self.start_freq != None and self.stop_freq != None: + self.VNA.start_frequency(self.start_freq) + self.VNA.stop_frequency(self.stop_freq) + elif self.center_freq != None and self.span != None: + self.VNA.center_frequency(self.center_freq) + self.VNA.span_frequency(self.span) + + self.VNA.npts(self.npts) + elif self.segment_list != None: + # delete all previous stored segments + self.VNA.delete_all_segments() + + # Load segments in reverse order to have them executed properly + for idx_segment in range(len(self.segment_list), 0, -1): + current_segment = self.segment_list[idx_segment-1] + str_to_write = 'SENSE:SEGMENT:INSERT %s, %s, %s, %s, %s, %s, %s' % (current_segment[0], current_segment[ + 1], current_segment[2], current_segment[3], current_segment[4], current_segment[5], current_segment[6]) + self.VNA.write(str_to_write) + + self.VNA.sweep_type('segment') # set a segment sweep + + # get the list of frequency used in the span from the VNA + self.sweep_points = self.VNA.get_stimulus() + + def finish(self, **kw): + self.VNA.rf_off() + + +class QWG_lutman_par(Soft_Sweep): + + def __init__(self, LutMan, LutMan_parameter, **kw): + self.set_kw() + self.name = LutMan_parameter.name + self.parameter_name = LutMan_parameter.label + self.unit = LutMan_parameter.unit + self.sweep_control = 'soft' + self.LutMan = LutMan + self.LutMan_parameter = LutMan_parameter + + def set_parameter(self, val): + self.LutMan.AWG.get_instr().stop() + self.LutMan_parameter.set(val) + self.LutMan.load_waveforms_onto_AWG_lookuptable(regenerate_waveforms=True) + self.LutMan.AWG.get_instr().start() + self.LutMan.AWG.get_instr().getOperationComplete() + + +class QWG_flux_amp(Soft_Sweep): + """ + Sweep function + """ + + def __init__(self, QWG, channel: int, frac_amp: float, **kw): + self.set_kw() + self.QWG = QWG + self.qwg_channel_amp_par = QWG.parameters['ch{}_amp'.format(channel)] + self.name = 'Flux_amp' + self.parameter_name = 'Flux_amp' + self.unit = 'V' + self.sweep_control = 'soft' + + # Amp = frac * Vpp/2 + self.scale_factor = 2/frac_amp + + def set_parameter(self, val): + Vpp = val * self.scale_factor + self.qwg_channel_amp_par(Vpp) + # Ensure the amplitude was set correctly + self.QWG.getOperationComplete() class lutman_par(Soft_Sweep): @@ -367,23 +537,19 @@ class lutman_par(Soft_Sweep): supported) """ - def __init__( - self, - LutMan: Base_LutMan, - LutMan_parameter - ): + def __init__(self, LutMan, LutMan_parameter): self.set_kw() self.name = LutMan_parameter.name self.parameter_name = LutMan_parameter.label self.unit = LutMan_parameter.unit self.sweep_control = 'soft' - self.LutMan = LutMan self.LutMan_parameter = LutMan_parameter def set_parameter(self, val): self.LutMan_parameter.set(val) - self.LutMan.load_waveforms_onto_AWG_lookuptable(regenerate_waveforms=True) + self.LutMan.load_waveforms_onto_AWG_lookuptable( + regenerate_waveforms=True) class anharmonicity_sweep(Soft_Sweep): @@ -398,17 +564,15 @@ def __init__(self, qubit, amps): self.parameter_name = qubit.anharmonicity.label self.unit = qubit.anharmonicity.unit self.sweep_control = 'soft' - self.qubit = qubit self.amps = amps def set_parameter(self, val): - self.qubit.anharmonicity.set(val) # _prep_mw_pulses will upload anharmonicity val to LutMan - self.qubit._prep_mw_pulses() # and we regenerate the waveform with that new modulation mw_lutman = self.qubit.instr_LutMan_MW.get_instr() - mw_lutman.load_ef_rabi_pulses_to_AWG_lookuptable(amps=self.amps) + mw_lutman.load_ef_rabi_pulses_to_AWG_lookuptable(amps=self.amps, + mod_freqs=[val]) class joint_HDAWG_lutman_parameters(Soft_Sweep): @@ -430,10 +594,9 @@ def __init__( self.name = name self.parameter_name = parameter_1.label self.unit = parameter_1.unit - self.sweep_control = 'soft' - self.lm = lutman self.AWG = AWG + self.sweep_control = 'soft' self.parameter_1 = parameter_1 self.parameter_2 = parameter_2 @@ -451,28 +614,23 @@ class RO_freq_sweep(Soft_Sweep): label and units are grabbed from parameter_1 """ - def __init__(self, name, qubit, ro_lutman, idx, parameter): + def __init__(self, qubit): self.set_kw() - self.name = name - self.parameter_name = parameter.label - self.unit = parameter.unit + self.name = 'Readout_frequency_sweep' + self.parameter_name = 'Readout_frequency' + self.unit = 'Hz' self.sweep_control = 'soft' - self.qubit = qubit - self.ro_lm = ro_lutman - self.idx = idx + self.ro_lm = qubit.instr_LutMan_RO.get_instr() + self.q_idx = qubit.cfg_qubit_nr() def set_parameter(self, val): LO_freq = self.ro_lm.LO_freq() IF_freq = val - LO_freq - - # Parameter 1 will be qubit.ro_freq() - # self.qubit.ro_freq.set(val) - # Parameter 2 will be qubit.ro_freq_mod() - self.qubit.ro_freq_mod.set(IF_freq) - - # self.ro_lm.set('M_modulation_R{}'.format(self.idx), IF_freq) - # self.ro_lm.load_waveforms_onto_AWG_lookuptable() + self.qubit.ro_freq_mod(IF_freq) + self.ro_lm.set('M_modulation_R{}'.format(self.q_idx), IF_freq) + self.ro_lm.load_DIO_triggered_sequence_onto_UHFQC() + self.qubit._prep_ro_integration_weights() @deprecated(version='0.4', reason='not used within pyqed') @@ -491,7 +649,6 @@ def __init__(self, LutMan, LutMan_parameter, self.parameter_name = LutMan_parameter.label self.unit = LutMan_parameter.unit self.sweep_points = sweep_points - self.chunk_size = chunk_size self.LutMan = LutMan self.LutMan_parameter = LutMan_parameter @@ -553,10 +710,6 @@ def __init__(self, LutMan, wave_func, sweep_points, chunk_size, param_unit='a.u.', **kw): super().__init__(**kw) - self.name = param_name - self.parameter_name = param_name - self.unit = param_unit - self.wave_func = wave_func self.chunk_size = chunk_size self.LutMan = LutMan @@ -564,6 +717,9 @@ def __init__(self, LutMan, wave_func, sweep_points, chunk_size, self.codewords = np.arange(chunk_size) else: self.codewords = codewords + self.name = param_name + self.parameter_name = param_name + self.unit = param_unit # Setting self.custom_swp_pts because self.sweep_points is overwritten # by the MC. self.custom_swp_pts = sweep_points @@ -594,7 +750,6 @@ def __init__(self, LutMan, LutMan_parameter, **kw): self.parameter_name = LutMan_parameter.label self.unit = 'dB' self.sweep_control = 'soft' - self.LutMan = LutMan self.LutMan_parameter = LutMan_parameter @@ -614,7 +769,6 @@ def __init__(self, LutMan, LutMan_parameter, run=False, single=True,**kw): self.parameter_name = LutMan_parameter.label self.unit = 'dB' self.sweep_control = 'soft' - self.LutMan = LutMan self.LutMan_parameter = LutMan_parameter self.run=run @@ -637,7 +791,6 @@ def __init__(self, UHFQC, **kw): self.parameter_name = "UHFQC attenuation" self.unit = 'dB' self.sweep_control = 'soft' - self.UHFQC = UHFQC # def set_parameter(self, val): @@ -659,7 +812,6 @@ def __init__( self.parameter_name = LutMan_parameter.label self.unit = LutMan_parameter.unit self.sweep_control = 'soft' - self.LutMan = LutMan self.LutMan_parameter = LutMan_parameter self.run = run @@ -693,7 +845,6 @@ def __init__( self.parameter_name = 'relative_depletion_pulse_scaling_amp' self.unit = 'a.u.' self.sweep_control = 'soft' - self.LutMan = LutMan self.optimization_M_amps = optimization_M_amps self.optimization_M_amp_down0s = optimization_M_amp_down0s @@ -733,15 +884,14 @@ def __init__( self.set_kw() self.name = LutMan_parameter.name self.parameter_name = LutMan_parameter.label - self.unit = 'dB' + self.unit = 'a.u.' self.sweep_control = 'soft' - self.LutMan = LutMan self.LutMan_parameter = LutMan_parameter self.run = run def set_parameter(self, val): - self.LutMan_parameter.set(10**(val/20)) + self.LutMan_parameter.set(val) if self.run: self.LutMan.AWG.get_instr().awgs_0_enable(False) @@ -774,7 +924,6 @@ def __init__( self.parameter_name = LutMan_parameter.label self.unit = 'dB' self.sweep_control = 'soft' - self.LutMan = LutMan self.LutMan_parameter = LutMan_parameter self.run = run @@ -803,7 +952,6 @@ def __init__( self.parameter_name = 'pulse attenuation' self.unit = 'dB' self.sweep_control = 'soft' - self.UHFQC = UHFQC self.dig_trigger = dig_trigger self.IF = IF @@ -833,12 +981,11 @@ def __init__( **kw ): self.set_kw() - self.name = name or 'multi_sweep' - self.parameter_name = parameter_name or 'multiple_parameters' - self.unit = sweep_functions[0].unit self.sweep_functions = sweep_functions self.sweep_control = 'soft' - + self.name = name or 'multi_sweep' + self.unit = sweep_functions[0].unit + self.parameter_name = parameter_name or 'multiple_parameters' self.sweep_point_ratios = sweep_point_ratios for i, sweep_function in enumerate(sweep_functions): if self.unit.lower() != sweep_function.unit.lower(): @@ -862,12 +1009,11 @@ class multi_sweep_function_ranges(Soft_Sweep): def __init__(self, sweep_functions: list, sweep_ranges: list, n_points: int, parameter_name=None, name=None,**kw): self.set_kw() - self.name = name or 'multi_sweep' - self.parameter_name = parameter_name or 'multiple_parameters' - self.unit = sweep_functions[0].unit self.sweep_functions = sweep_functions self.sweep_control = 'soft' - + self.name = name or 'multi_sweep' + self.unit = sweep_functions[0].unit + self.parameter_name = parameter_name or 'multiple_parameters' self.sweep_ranges = sweep_ranges self.n_points = n_points for i, sweep_function in enumerate(sweep_functions): @@ -891,13 +1037,12 @@ class two_par_joint_sweep(Soft_Sweep): def __init__(self, par_A, par_B, preserve_ratio: bool=True, retrieve_value=False, instr=None, **kw): self.set_kw() - self.name = par_A.name - self.parameter_name = par_A.name - self.sweep_control = 'soft' - self.unit = par_A.unit + self.sweep_control = 'soft' self.par_A = par_A self.par_B = par_B + self.name = par_A.name + self.parameter_name = par_A.name self.retrieve_value = retrieve_value self.instr=instr if preserve_ratio: @@ -931,13 +1076,12 @@ def __init__( bypass_waveform_upload: bool=False ): super().__init__() - self.name = par.name - self.parameter_name = par.name - self.unit = par.unit - self.lm = lm self.par = par self.waveform_name = waveform_name + self.parameter_name = par.name + self.unit = par.unit + self.name = par.name self.amp_for_generation = amp_for_generation self.upload_waveforms_always = upload_waveforms_always self.bypass_waveform_upload = bypass_waveform_upload @@ -945,7 +1089,6 @@ def __init__( self.AWG = self.lm.AWG.get_instr() self.awg_model_QWG = self.AWG.IDN()['model'] == 'QWG' # FIXME: use class name instead of asking instrument - # FIXME: move to HAL def set_parameter(self, val): # Just in case there is some resolution or number precision differences # when setting the value @@ -979,39 +1122,47 @@ def set_parameter_QWG(self, val): self.AWG.start() return - class flux_t_middle_sweep(Soft_Sweep): + def __init__( self, fl_lm_tm: List[Base_Flux_LutMan], fl_lm_park: List[Base_Flux_LutMan], which_gate: List[str], t_pulse: List[float], - duration: float = 40e-9 + duration: float = 40e-9, + time_park: float = None, ): super().__init__() self.name = 'time_middle' self.parameter_name = 'time_middle' self.unit = 's' - self.fl_lm_tm = fl_lm_tm self.fl_lm_park = fl_lm_park self.which_gate = which_gate self.t_pulse = t_pulse self.duration = duration + self.time_park = time_park def set_parameter(self, val): which_gate = self.which_gate t_pulse = np.repeat(self.t_pulse, 2) sampling_rate = self.fl_lm_tm[0].sampling_rate() total_points = self.duration*sampling_rate + # Calculate vcz times for each flux pulse time_mid = val / sampling_rate n_points = [ np.ceil(tp / 2 * sampling_rate) for tp in t_pulse ] + if self.time_park == None: + n_time_park = np.max(n_points)*2 + val + 4 + else: + n_time_park = np.ceil(self.time_park*sampling_rate) + n_time_park_pad = np.ceil((total_points-n_time_park)/2) + n_time_pad = np.ceil((total_points-(np.max(n_points)*2 + val + 4))/2) time_sq = [ n / sampling_rate for n in n_points ] - time_park= np.max(time_sq)*2 + time_mid + 4/sampling_rate - time_park_pad = np.ceil((self.duration-time_park)/2*sampling_rate)/sampling_rate - time_pad = np.abs(np.array(time_sq)-np.max(time_sq))+time_park_pad + time_park = n_time_park/sampling_rate + time_park_pad = n_time_park_pad/sampling_rate + time_pad = np.abs(np.array(time_sq)-np.max(time_sq))+n_time_pad/sampling_rate # update parameters and upload waveforms Lutmans = self.fl_lm_tm + self.fl_lm_park AWGs = np.unique([lm.AWG() for lm in Lutmans]) @@ -1022,7 +1173,6 @@ def set_parameter(self, val): fl_lm.set('vcz_time_single_sq_{}'.format(which_gate[i]), time_sq[i]) fl_lm.set('vcz_time_middle_{}'.format(which_gate[i]), time_mid) fl_lm.set('vcz_time_pad_{}'.format(which_gate[i]), time_pad[i]) - fl_lm.set('vcz_amp_fine_{}'.format(which_gate[i]), .5) fl_lm.load_waveform_onto_AWG_lookuptable( wave_id=f'cz_{which_gate[i]}', regenerate_waveforms=True) # set flux lutman parameters of Park qubits @@ -1037,6 +1187,44 @@ def set_parameter(self, val): return val +class flux_make_pulse_netzero(Soft_Sweep): + + def __init__(self, + flux_lutman, + wave_id: str + ): + super().__init__() + self.name = 'amp_pad_samples' + self.parameter_name = 'amp_pad_samples' + self.unit = 'samples' + self.flux_lutman = flux_lutman + self.wave_id = wave_id + + def set_parameter(self, val): + # Look for waveform + flux_lutman = self.flux_lutman + dirct = self.wave_id.split('_')[-1] + # generate unpadded waveform + flux_lutman.set(f'vcz_amp_pad_{dirct}', 0) + flux_lutman.generate_standard_waveforms() + wf = flux_lutman._wave_dict[self.wave_id] + flux_lutman.set(f'vcz_amp_pad_samples_{dirct}', val) + n_samples = val + # Set amplitude of padding to achieve net-zeroness + net_area = np.trapz(wf)*1/2.4e9 + time_pad = (flux_lutman.get(f'vcz_time_pad_{dirct}') - n_samples/2.4e9)*2 + amp_pad = -(net_area)/time_pad + # update parameters and upload waveforms + AWG = flux_lutman.AWG() + flux_lutman.find_instrument(AWG).stop() + # set flux lutman parameters of CZ qubits + flux_lutman.set(f'vcz_amp_pad_{dirct}', amp_pad) + flux_lutman.load_waveform_onto_AWG_lookuptable( + wave_id=self.wave_id, regenerate_waveforms=True) + flux_lutman.find_instrument(AWG).start() + return val + + class Nested_resonator_tracker(Soft_Sweep): """ Sets a parameter and performs a "find_resonator_frequency" measurement @@ -1055,14 +1243,13 @@ def __init__( **kw ): super().__init__(**kw) - self.name = par.name - self.parameter_name = par.name - self.unit = par.unit - self.qubit = qubit self.freqs = freqs self.par = par self.nested_MC = nested_MC + self.parameter_name = par.name + self.unit = par.unit + self.name = par.name self.reload_marked_sequence = reload_sequence self.sequence_file = sequence_file self.cc = cc @@ -1074,6 +1261,7 @@ def set_parameter(self, val): freqs=self.freqs, MC=self.nested_MC, use_min=self.use_min) self.qubit._prep_ro_sources() + self.qubit.prepare_for_continuous_wave() if self.reload_marked_sequence: # reload the meaningfull sequence self.cc.stop() @@ -1104,11 +1292,7 @@ def __init__( self.name = par.name self.parameter_name = par.name self.unit = par.unit - self.qubit = qubit - # FIXME: commented out unused attributes, cleanup parameters - # self.par = par - # self.nested_MC = nested_MC self.reload_marked_sequence = reload_sequence self.sequence_file = sequence_file self.cc = cc @@ -1149,8 +1333,6 @@ def __init__( self.reload_marked_sequence = reload_sequence self.sequence_file = sequence_file self.cc = cc - # FIXME: commented out unused attributes, cleanup parameters - # self.nested_MC = nested_MC def set_parameter(self, val): self.par(val) @@ -1164,12 +1346,11 @@ def set_parameter(self, val): class tim_flux_latency_sweep(Soft_Sweep): def __init__(self, device): super().__init__() + self.dev = device self.name = 'Flux latency' self.parameter_name = 'Flux latency' self.unit = 's' - self.dev = device - def set_parameter(self, val): # FIXME: use HAL, or _NUM_INSTR_AWG_FLUX self.dev.tim_flux_latency_0(val) @@ -1184,12 +1365,11 @@ def set_parameter(self, val): class tim_ro_latency_sweep(Soft_Sweep): def __init__(self, device): super().__init__() + self.dev = device self.name = 'RO latency' self.parameter_name = 'RO latency' self.unit = 's' - self.dev = device - def set_parameter(self, val): # FIXME: use HAL, or _NUM_INSTR_ACQ self.dev.tim_ro_latency_0(val) @@ -1203,12 +1383,11 @@ def set_parameter(self, val): class tim_mw_latency_sweep(Soft_Sweep): def __init__(self, device): super().__init__() + self.dev = device self.name = 'MW latency' self.parameter_name = 'MW latency' self.unit = 's' - self.dev = device - def set_parameter(self, val): # FIXME: use HAL, or _NUM_INSTR_AWG_MW self.dev.tim_mw_latency_0(val) @@ -1225,12 +1404,11 @@ def set_parameter(self, val): class tim_mw_latency_sweep_1D(Soft_Sweep): def __init__(self, device): super().__init__() + self.dev = device self.name = 'MW latency' self.parameter_name = 'MW latency' self.unit = 's' - self.dev = device - def set_parameter(self, val): # FIXME: use HAL self.dev.tim_mw_latency_0(val) @@ -1246,12 +1424,11 @@ class SweepAlong2DContour(Soft_Sweep): """ def __init__(self, par_A, par_B, contour_pnts, interp_kw: dict = {}): super().__init__() + self.par_A = par_A + self.par_B = par_B self.name = 'Contour sweep' self.parameter_name = 'Contour sweep' self.unit = 'a.u.' - - self.par_A = par_A - self.par_B = par_B self.interpolator = c2d.interp_2D_contour(contour_pnts, **interp_kw) def set_parameter(self, val): @@ -1443,3 +1620,18 @@ def set_parameter(self, val): self.QWG.getOperationComplete() + + +class LRU_freq_sweep(Soft_Sweep): + def __init__(self, qubit): + super().__init__() + self.qubit = qubit + self.LRU_LO = qubit.instr_LO_LRU.get_instr() + self.name = 'LRU pulse frequency' + self.parameter_name = 'LRU pulse frequency' + self.unit = 'Hz' + + def set_parameter(self, val): + LO_freq = val - self.qubit.LRU_freq_mod() + self.LRU_LO.frequency(LO_freq) + return val \ No newline at end of file diff --git a/pycqed/measurement/waveform_control_CC/waveform.py b/pycqed/measurement/waveform_control_CC/waveform.py index c195647456..707eff5237 100644 --- a/pycqed/measurement/waveform_control_CC/waveform.py +++ b/pycqed/measurement/waveform_control_CC/waveform.py @@ -113,7 +113,8 @@ def gauss_pulse( return pulse_I, pulse_Q -def single_channel_block(amp, length, sampling_rate=2e8, delay=0): +def single_channel_block(amp, length, sampling_rate=2e8, delay=0, + gauss_sigma=0): ''' Generates a block pulse. amp in V @@ -128,6 +129,15 @@ def single_channel_block(amp, length, sampling_rate=2e8, delay=0): block = amp * np.ones(int(pulse_samples)) Zeros = np.zeros(int(delay_samples)) pulse = np.array(list(Zeros) + list(block)) + + # # Added for gaussian convolution (Jorge/Hany) + # from scipy.ndimage import gaussian_filter1d + # gauss_samples = int(np.round(gauss_sigma * sampling_rate)) + # padding_samples = 144-len(pulse) + # # padding_samples = int(np.round((length + 200e-9) * sampling_rate)) # 144-len(pulse) + # pulse = np.array(list(pulse)+list(np.zeros(padding_samples))) + # if gauss_samples > 0: + # pulse = gaussian_filter1d(pulse, sigma=gauss_samples) return pulse @@ -365,3 +375,29 @@ def mod_square_VSM( D_I_mod, D_Q_mod = mod_pulse(D_I, D_Q, f_modulation, sampling_rate=sampling_rate) return G_I_mod, G_Q_mod, D_I_mod, D_Q_mod + + +def mod_lru_pulse(t_total, t_rise, + f_modulation, amplitude, + sampling_rate = 2.4e9): + ''' + Leakage reduction unit waveform. + ''' + # Nr of sampling points alocated for each + # part of the pulse. + n_total = int(t_total*sampling_rate ) + n_rise = int(t_rise*sampling_rate) + # Rise part of waveform + _x_rise = np.arange(n_rise) + _rise_wf = amplitude*np.sin(np.pi*_x_rise/(2*t_rise*sampling_rate))**2 + # Middle part of waveform + _x_mid = np.arange(n_total-2*n_rise) + _mid_wf = amplitude*np.ones(len(_x_mid)) + # Concatenate waveforms together + _x = np.arange(n_total) + _wf_I = np.concatenate([_rise_wf, _mid_wf, _rise_wf[::-1]]) + _wf_Q = np.zeros(n_total) + # _wf_mod = _wf*np.sin(2*np.pi*frequency*_x/sampling_rate) + wf_I_mod, wf_Q_mod = mod_pulse(_wf_I, _wf_Q, f_modulation, + sampling_rate=sampling_rate) + return wf_I_mod, wf_Q_mod diff --git a/pycqed/measurement/waveform_control_CC/waveforms_vcz.py b/pycqed/measurement/waveform_control_CC/waveforms_vcz.py index 2786b57620..7d234545c5 100644 --- a/pycqed/measurement/waveform_control_CC/waveforms_vcz.py +++ b/pycqed/measurement/waveform_control_CC/waveforms_vcz.py @@ -3,10 +3,11 @@ Purpose: generate flux waveforms for VCZ gates and phase corrections; toolbox for vcz waveforms """ - +from dataclasses import dataclass import numpy as np import math import logging +from typing import Dict from qcodes.instrument.parameter import ManualParameter from qcodes.utils import validators as vals @@ -16,7 +17,9 @@ def add_vcz_parameters(this_flux_lm, which_gate: str = None): """ Adds to `this_flux_lm` the necessary parameters used for the VCZ - flux waveform including corrections + flux waveform including corrections. + Extends the VCZ parameters with additional (pre)parking parameters during the cz flux-pulse + in order to avoid specific frequency collision cases. """ this_flux_lm.add_parameter( "vcz_amp_dac_at_11_02_%s" % which_gate, @@ -24,7 +27,7 @@ def add_vcz_parameters(this_flux_lm, which_gate: str = None): "interaction point. NB: the units might be different for some " "other AWG that is distinct from the HDAWG.", parameter_class=ManualParameter, - vals=vals.Numbers(0.0, 10.0), + vals=vals.Numbers(-10.0, 10.0), initial_value=0.5, unit="a.u.", label="DAC amp. at the interaction point", @@ -125,6 +128,14 @@ def add_vcz_parameters(this_flux_lm, which_gate: str = None): unit="s", label="Time before correction", ) + this_flux_lm.add_parameter( + "vcz_use_net_zero_pulse_%s" % which_gate, + docstring="Flag to turn on the net-zero character of the SNZ pulse", + parameter_class=ManualParameter, + vals=vals.Bool(), + initial_value=True, + label="Use net-zero pulse amplitudes", + ) this_flux_lm.add_parameter( "vcz_use_asymmetric_amp_%s" % which_gate, docstring="Flag to turn on asymmetric amplitudes of the SNZ pulse", @@ -134,53 +145,63 @@ def add_vcz_parameters(this_flux_lm, which_gate: str = None): label="Use asymmetric SNZ pulse amplitudes", ) this_flux_lm.add_parameter( - "vcz_amp_pos_%s" % which_gate, - docstring="Amplitude of positive part of SNZ pulse, " + "vcz_asymmetry_%s" % which_gate, + docstring="Asymmetry of SNZ pulse, " "used only if vcz_use_asymmetric_amp is true.", parameter_class=ManualParameter, - vals=vals.Numbers(0.0, 10.0), - initial_value=1.0, + vals=vals.Numbers(-1.0, 1.0), + initial_value=0.0, unit="a.u.", - label="Positive SNZ amplitude, if asymmetric is used.", + label="Asymmetry of SNZ pulse, if asymmetric is used.", ) this_flux_lm.add_parameter( - "vcz_amp_neg_%s" % which_gate, - docstring="Amplitude of negative part of SNZ pulse, " - "used only if vcz_use_asymmetric_amp is true.", + "vcz_amp_pad_%s" % which_gate, + docstring="Amplitude padded part of SNZ pulse", parameter_class=ManualParameter, - vals=vals.Numbers(0.0, 10.0), - initial_value=1.0, + vals=vals.Numbers(-1.0, 1.0), + initial_value=0.0, unit="a.u.", - label="Negative SNZ amplitude, if asymmetric is used.", + label="Amplitude padded part of SNZ pulse.", + ) + this_flux_lm.add_parameter( + "vcz_amp_pad_samples_%s" % which_gate, + docstring="Nr of padded samples part of SNZ pulse", + parameter_class=ManualParameter, + vals=vals.Numbers(0, 200), + initial_value=12, + unit="nr of samples", + label="Nr of padded samples part of SNZ pulse.", + ) + # Parameters used to define pre-parking during NZ gate + this_flux_lm.add_parameter( + f"vcz_amp_prepark_{which_gate}", + docstring="Amplitude of the start and end of the NZ pulse (keeping t_p unchanged)," + "pre-parking the qubit before starting the intended interaction." + "1.0 means same amplitude as `sq_amp_XX`.", + parameter_class=ManualParameter, + vals=vals.Numbers(0.0, 1.0), + initial_value=0.0, + unit="a.u.", + label="Pre-park tuning amp.", + ) + this_flux_lm.add_parameter( + f"vcz_time_prepark_{which_gate}", + docstring="Duration of the pre-park block before and after NZ pulse. " + "Should be set such that collision avoidance is the satisfied at the start and end of the NZ pulse.", + parameter_class=ManualParameter, + vals=vals.Numbers(0, 500e-9), + initial_value=0, + unit="s", + label="Duration pre-park square", + ) + this_flux_lm.add_parameter( + f"vcz_use_prepark_{which_gate}", + docstring="", + parameter_class=ManualParameter, + vals=vals.Bool(), + initial_value=False, + label="Add extra points with amplitude `vcz_amp_prepark_XX`?", ) - - for specificity in ["coarse", "fine"]: - this_flux_lm.add_parameter( - "vcz_{}_optimal_hull_{}".format(specificity, which_gate), - initial_value=np.array([]), - label="{} hull".format(specificity), - docstring=( - "Stores the boundary points of a optimal region 2D region " - "generated from a landscape. Intended for data points " - "(x, y) = (`vcz_amp_sq_XX`, `vcz_time_middle_XX`)" - ), - parameter_class=ManualParameter, - vals=vals.Arrays(), - ) - this_flux_lm.add_parameter( - "vcz_{}_cond_phase_contour_{}".format(specificity, which_gate), - initial_value=np.array([]), - label="{} contour".format(specificity), - docstring=( - "Stores the points for an optimal conditional phase " - "contour generated from a landscape. Intended for data points " - "(x, y) = (`vcz_amp_sq_XX`, `vcz_time_middle_XX`) " - "typically for the 180 deg cond. phase." - ), - parameter_class=ManualParameter, - vals=vals.Arrays(), - ) - def align_vcz_q_phase_corr_with( @@ -264,7 +285,7 @@ def vcz_waveform( which_gate: str = None, sim_ctrl_cz=None, return_dict=False -): + ): amp_at_sweetspot = 0.0 if which_gate is None and sim_ctrl_cz is not None: which_gate = sim_ctrl_cz.which_gate() @@ -279,6 +300,11 @@ def vcz_waveform( # we might need to use asymmetric pulse amplitudes for the NZ pulse # if the qubit is operated off-sweetspot and interaction points are at different distances use_asymmetric_NZ = fluxlutman.get("vcz_use_asymmetric_amp_{}".format(which_gate)) + # if one wants to use unipolar pulses instead + use_net_zero_pulse = fluxlutman.get("vcz_use_net_zero_pulse_{}".format(which_gate)) + # In case we might want to perform a pre-parking during the NZ pulse + # thereby avoiding unwanted frequency collisions with neighboring gates. + use_prepark: bool = fluxlutman.get(f"vcz_use_prepark_{which_gate}") # single qubit phase correction parameters correct_q_phase = fluxlutman.get("vcz_correct_q_phase_{}".format(which_gate)) @@ -293,13 +319,18 @@ def vcz_waveform( time_middle = fluxlutman.get("vcz_time_middle_{}".format(which_gate)) time_middle = time_middle * sampling_rate # avoid numerical issues + time_prepark: float = fluxlutman.get(f"vcz_time_prepark_{which_gate}") + time_prepark: float = time_prepark * sampling_rate # avoid numerical issues + # padding time at each side of the pulse, to fill to the cycle length time_pad = fluxlutman.get("vcz_time_pad_{}".format(which_gate)) time_pad = time_pad * sampling_rate + n_pad_samples = fluxlutman.get("vcz_amp_pad_samples_{}".format(which_gate)) # normalized to the amplitude at the CZ interaction point norm_amp_sq = fluxlutman.get("vcz_amp_sq_{}".format(which_gate)) norm_amp_fine = fluxlutman.get("vcz_amp_fine_{}".format(which_gate)) + norm_amp_prepark: float = fluxlutman.get(f"vcz_amp_prepark_{which_gate}") # This is to avoid numerical issues when the user would run sweeps with # e.g. `time_at_swtspt = np.arange(0/2.4e9, 10/ 2.4e9, 2/2.4e9)` @@ -309,55 +340,76 @@ def vcz_waveform( time_sqr = np.round(time_sqr / dt) * dt half_time_q_ph_corr = np.round(time_q_ph_corr / 2 / dt) * dt time_pad = np.round(time_pad / dt) * dt - - pad_amps = np.full(int(time_pad / dt), 0) + time_prepark = np.round(time_prepark / dt) * dt + + # Added padding amplitude by Jorge 22/08/2023 + pad_amp = fluxlutman.get("vcz_amp_pad_{}".format(which_gate)) + # Only add padding if amplitude is > 0 + if abs(amp_at_int_11_02) > 1e-3: + pad_amps = np.full(int(time_pad / dt), 0) + pad_amp/amp_at_int_11_02 + for _i in range(len(pad_amps)): + if _i < n_pad_samples: + pad_amps[_i] = 0 + # If not, just add zero padding. + else: + pad_amps = np.full(int(time_pad / dt), 0) sq_amps = np.full(int(time_sqr / dt), norm_amp_sq) amps_middle = np.full(int(time_middle / dt), amp_at_sweetspot) if use_asymmetric_NZ: # build asymmetric SNZ amplitudes - norm_amp_pos = fluxlutman.get("vcz_amp_pos_{}".format(which_gate)) - norm_amp_neg = fluxlutman.get("vcz_amp_neg_{}".format(which_gate)) + # norm_amp_pos = fluxlutman.get("vcz_amp_pos_{}".format(which_gate)) + # norm_amp_neg = fluxlutman.get("vcz_amp_neg_{}".format(which_gate)) + norm_amp_pos = 1+fluxlutman.get("vcz_asymmetry_{}".format(which_gate)) + norm_amp_neg = 1-fluxlutman.get("vcz_asymmetry_{}".format(which_gate)) pos_sq_amps = np.full(int(time_sqr / dt), norm_amp_pos) neg_sq_amps = np.full(int(time_sqr / dt), norm_amp_neg) + slope_amp_pos = slope_amp_neg = np.array([]) if use_amp_fine: # slope amp will be using the same scaling factor as in the symmetric case, # but relative to pos and neg amplitudes # such that this amp is in the range [0, 1] slope_amp_pos = np.array([norm_amp_fine * norm_amp_pos]) slope_amp_neg = np.array([norm_amp_fine * norm_amp_neg]) - else: # sdfsdfsd - slope_amp_pos = slope_amp_neg = np.array([]) - pos_NZ_amps = np.concatenate((pos_sq_amps, slope_amp_pos)) - neg_NZ_amps = np.concatenate((slope_amp_neg, neg_sq_amps)) + prepark_amp_pos: np.ndarray = np.array([]) + prepark_amp_neg: np.ndarray = np.array([]) + if use_prepark: + prepark_amp_pos = np.array([norm_amp_prepark * norm_amp_pos] * int(time_prepark / dt)) + prepark_amp_neg = np.array([norm_amp_prepark * norm_amp_neg] * int(time_prepark / dt)) + + pos_NZ_amps = np.concatenate((prepark_amp_pos, pos_sq_amps, slope_amp_pos)) + neg_NZ_amps = np.concatenate((slope_amp_neg, neg_sq_amps, prepark_amp_neg)) amp = np.concatenate( ([amp_at_sweetspot], pad_amps, pos_NZ_amps, amps_middle, - -neg_NZ_amps, - pad_amps, + (1-use_net_zero_pulse*2)*neg_NZ_amps, + pad_amps[::-1], [amp_at_sweetspot]) ) else: + slope_amp = np.array([]) if use_amp_fine: # such that this amp is in the range [0, 1] slope_amp = np.array([norm_amp_fine * norm_amp_sq]) - else: - slope_amp = np.array([]) - half_NZ_amps = np.concatenate((sq_amps, slope_amp)) + prepark_amps: np.ndarray = np.array([]) + if use_prepark: + prepark_amps = np.array([norm_amp_prepark * norm_amp_sq] * int(time_prepark / dt)) + + half_NZ_amps = np.concatenate((prepark_amps, sq_amps, slope_amp)) amp = np.concatenate( ([amp_at_sweetspot], pad_amps, half_NZ_amps, amps_middle, - -half_NZ_amps[::-1], - pad_amps, + (1-use_net_zero_pulse*2)*half_NZ_amps[::-1], + pad_amps[::-1], [amp_at_sweetspot]) ) @@ -417,6 +469,21 @@ def vcz_waveform( return amp + +def scz_waveform( + fluxlutman, + which_gate: str = None, + sim_ctrl_cz=None, + return_dict=False + ): + return vcz_waveform( + fluxlutman=fluxlutman, + which_gate=which_gate, + sim_ctrl_cz=sim_ctrl_cz, + return_dict=return_dict, + ) + + # ###################################################################### # Auxiliary tools # ###################################################################### diff --git a/pycqed/qce_utils/__init__.py b/pycqed/qce_utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pycqed/qce_utils/analysis_factory/__init__.py b/pycqed/qce_utils/analysis_factory/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pycqed/qce_utils/analysis_factory/factory_transmon_arc_identifier.py b/pycqed/qce_utils/analysis_factory/factory_transmon_arc_identifier.py new file mode 100644 index 0000000000..1d98abfcb6 --- /dev/null +++ b/pycqed/qce_utils/analysis_factory/factory_transmon_arc_identifier.py @@ -0,0 +1,246 @@ +# ------------------------------------------- +# Factory module for constructing transmon-flux-arc identifier analysis. +# ------------------------------------------- +from abc import ABC, abstractmethod +from dataclasses import dataclass, field +from typing import List, Tuple +import warnings +import numpy as np +import matplotlib.transforms as transforms +from scipy.optimize import minimize +from pycqed.qce_utils.custom_exceptions import InterfaceMethodException +from pycqed.qce_utils.analysis_factory.intrf_analysis_factory import IFactoryManager, FigureDetails +from pycqed.qce_utils.analysis_factory.plotting_functionality import ( + construct_subplot, + SubplotKeywordEnum, + LabelFormat, + AxesFormat, + IFigureAxesPair, +) + + +@dataclass(frozen=True) +class Vec2D: + """ + Data class, containing x- and y-coordinate vector. + """ + x: float + y: float + + # region Class Methods + def to_vector(self) -> np.ndarray: + return np.asarray([self.x, self.y]) + + def to_tuple(self) -> Tuple[float, float]: + return self.x, self.y + + @classmethod + def from_vector(cls, vector: np.ndarray) -> 'Vec2D': + return Vec2D( + x=vector[0], + y=vector[1], + ) + + def __add__(self, other): + if isinstance(other, Vec2D): + return Vec2D(x=self.x + other.x, y=self.y + other.y) + raise NotImplemented(f"Addition with anything other than {Vec2D} is not implemented.") + # endregion + + +class IFluxArcIdentifier(ABC): + """ + Interface class, describing properties and get-methods for flux-arc identifier. + """ + + @property + @abstractmethod + def polynomial(self) -> np.polyfit: + """:return: Internally fitted polynomial.""" + raise InterfaceMethodException + + @property + @abstractmethod + def origin(self) -> Vec2D: + """:return: (Flux) arc origin x-y 2D vector.""" + raise InterfaceMethodException + + @abstractmethod + def get_amplitudes_at(self, detuning: float) -> np.ndarray: + """ + Filters only real roots. + :param detuning: detuning (y-value) at which to find the corresponding amplitude roots (x-values). + :return: Amplitudes (x-values) corresponding to desired detuning (y-values). + """ + roots: np.ndarray = (self.polynomial - detuning).roots + return roots[np.isclose(roots.imag, 0)].real + + +@dataclass(frozen=True) +class FluxArcIdentifier(IFluxArcIdentifier): + """ + Data class, containing (AC) flux pulse amplitude vs (Ramsey) frequency detuning. + """ + _amplitude_array: np.ndarray = field(init=True) + _detuning_array: np.ndarray = field(init=True) + _polynomial: np.polyfit = field(init=False) + + # region Class Properties + @property + def amplitudes(self) -> np.ndarray: + return self._amplitude_array + + @property + def detunings(self) -> np.ndarray: + return self._detuning_array + + @property + def polynomial(self) -> np.polyfit: + """:return: Internally fitted polynomial.""" + return self._polynomial + + @property + def origin(self) -> Vec2D: + """:return: (Flux) arc origin x-y 2D vector.""" + _polynomial = self.polynomial + result = minimize(_polynomial, x0=0) + return Vec2D( + x=result.x[0], + y=result.fun, + ) + + # endregion + + # region Class Methods + def __post_init__(self): + object.__setattr__(self, '_polynomial', self._construct_poly_fit( + x=self.amplitudes, + y=self.detunings, + )) + + def get_amplitudes_at(self, detuning: float) -> np.ndarray: + """ + Filters only real roots. + :param detuning: detuning (y-value) at which to find the corresponding amplitude roots (x-values). + :return: Amplitudes (x-values) corresponding to desired detuning (y-values). + """ + roots: np.ndarray = (self.polynomial - detuning).roots + real_roots: np.ndarray = roots[np.isclose(roots.imag, 0)].real + if len(real_roots) == 0: + warnings.warn(**PolynomialRootNotFoundWarning.warning_format(detuning)) + return real_roots + # endregion + + # region Static Class Methods + @staticmethod + def _construct_poly_fit(x: np.ndarray, y: np.ndarray) -> np.poly1d: + """:return: Custom polynomial a*x^4 + b*x^3 + c*x^2 + d*x + 0.""" + # Construct the design matrix including x^4, x^3, x^2, and x^1. + x_stack = np.column_stack((x ** 4, x ** 3, x ** 2, x)) + # Perform the linear least squares fitting + coefficients, residuals, rank, s = np.linalg.lstsq(x_stack, y, rcond=None) + # coefficients are the coefficients for x^4, x^3, x^2, and x^1 term respectively + a, b, c, d = coefficients + return np.poly1d([a, b, c, d, 0]) + # endregion + + +class FluxArcIdentifierAnalysis(IFactoryManager[FluxArcIdentifier]): + + # region Class Methods + def analyse(self, response: FluxArcIdentifier) -> List[FigureDetails]: + """ + Constructs one or multiple (matplotlib) figures from characterization response. + :param response: Characterization response used to construct analysis figures. + :return: Array-like of analysis figures. + """ + fig, ax = self.plot_flux_arc_identifier( + identifier=response, + ) + + return [ + FigureDetails(figure_object=fig, identifier="voltage_to_detuning"), + ] + + # endregion + + # region Static Class Methods + @staticmethod + def format_coefficient(coef): + """Format coefficient into scientific notation with LaTeX exponent format.""" + return f"{coef:.2e}".replace('+0', '^{').replace('-0', '-') + '}' + + @staticmethod + def plot_flux_arc_identifier(identifier: FluxArcIdentifier, **kwargs) -> IFigureAxesPair: + """ + :param identifier: + :param kwargs: + :return: + """ + # Data allocation + nyquist_frequency: float = 1.3e9 # Based on AWG sampling rate of 2.4GHz + roots: np.ndarray = identifier.get_amplitudes_at(detuning=nyquist_frequency) + min_root: float = float(np.min(np.abs(roots))) + high_resolution_amplitudes: np.ndarray = np.linspace(-min_root, min_root, 101) + # Evaluate the fitted polynomial + fitted_polynomial = identifier.polynomial + y_fit = fitted_polynomial(high_resolution_amplitudes) + origin: Vec2D = identifier.origin + + kwargs[SubplotKeywordEnum.LABEL_FORMAT.value] = kwargs.get(SubplotKeywordEnum.LABEL_FORMAT.value, LabelFormat( + x_label='Output voltage [V]', + y_label='Detuning [Hz]', + )) + fig, ax = construct_subplot(**kwargs) + ax.plot( + identifier.amplitudes, + identifier.detunings, + linestyle='none', + marker='o', + ) + ax.plot( + high_resolution_amplitudes, + y_fit, + linestyle='--', + marker='none', + color='k', + ) + ax.axhline(origin.y, linestyle='--', color='lightgrey', zorder=-1) + ax.axvline(origin.x, linestyle='--', color='lightgrey', zorder=-1) + + # Display the polynomial equation in the plot + a, b, c, d, _ = fitted_polynomial.coeffs + formatter = FluxArcIdentifierAnalysis.format_coefficient + equation_text = f"$y = {formatter(a)}x^4 + {formatter(b)}x^3 + {formatter(c)}x^2 + {formatter(d)}x$" + ax.text(0.5, 0.95, equation_text, transform=ax.transAxes, ha='center', va='top') + + ylim = ax.get_ylim() + # Draw horizontal line to indicate asymmetry + desired_detuning: float = 500e6 + roots: np.ndarray = identifier.get_amplitudes_at(detuning=desired_detuning) + if roots.size > 0: + negative_root = float(roots[roots <= 0]) + negative_arc_x = negative_root + negative_arc_y = fitted_polynomial(negative_arc_x) + positive_arc_x = -negative_arc_x + positive_arc_y = fitted_polynomial(positive_arc_x) + # Draw comparison lines + color: str = 'green' + ax.hlines(y=negative_arc_y, xmin=min(high_resolution_amplitudes), xmax=origin.x, linestyle='--', color=color, zorder=-1) + ax.hlines(y=positive_arc_y, xmin=origin.x, xmax=positive_arc_x, linestyle='--', color=color, zorder=-1) + ax.vlines(x=negative_arc_x, ymin=ylim[0], ymax=negative_arc_y, linestyle='--', color=color, zorder=-1) + # Draw annotations + ax.annotate('', xy=(origin.x, positive_arc_y), xytext=(origin.x, negative_arc_y), arrowprops=dict(arrowstyle="<->", color=color)) + delta: float = abs(positive_arc_y - negative_arc_y) + arrow_y_position: float = min(positive_arc_y, negative_arc_y) + delta / 2 + text_y_position: float = max(positive_arc_y * 1.05, arrow_y_position) + ax.text(origin.x, text_y_position, f' $\Delta={delta * 1e-6:.2f}$ MHz', ha='left', va='center') + ax.text(negative_arc_x, negative_arc_y, f' {desired_detuning * 1e-6:.0f} MHz at {negative_arc_x:.2f} V', ha='left', va='bottom') + # Draw origin offset + transform = transforms.blended_transform_factory(ax.transAxes, ax.transData) + ax.text(0.98, origin.y, f'{origin.y * 1e-6:.3f} MHz', ha='right', va='bottom', transform=transform) + + ax.set_xlim(left=min(high_resolution_amplitudes), right=max(high_resolution_amplitudes)) + ax.set_ylim(ylim) + return fig, ax + # endregion diff --git a/pycqed/qce_utils/analysis_factory/intrf_analysis_factory.py b/pycqed/qce_utils/analysis_factory/intrf_analysis_factory.py new file mode 100644 index 0000000000..309d94f17c --- /dev/null +++ b/pycqed/qce_utils/analysis_factory/intrf_analysis_factory.py @@ -0,0 +1,42 @@ +# ------------------------------------------- +# Module containing interface for analysis factory components. +# ------------------------------------------- +from abc import ABC, abstractmethod, ABCMeta +from dataclasses import dataclass, field +from typing import TypeVar, Dict, Type, List, Generic, Union +import logging +from enum import Enum, unique +import matplotlib.pyplot as plt +from pycqed.qce_utils.custom_exceptions import ( + InterfaceMethodException, +) + + +# Set up basic configuration for logging +logging.basicConfig(level=logging.WARNING, format='%(levelname)s:%(message)s') + + +T = TypeVar('T', bound=Type) + + +@dataclass(frozen=True) +class FigureDetails: + figure_object: plt.Figure + identifier: str + + +class IFactoryManager(ABC, Generic[T], metaclass=ABCMeta): + """ + Interface class, describing methods for manager factories. + """ + + # region Class Methods + @abstractmethod + def analyse(self, response: T) -> List[FigureDetails]: + """ + Constructs one or multiple (matplotlib) figures from characterization response. + :param response: Characterization response used to construct analysis figures. + :return: Array-like of analysis figures. + """ + raise InterfaceMethodException + # endregion diff --git a/pycqed/qce_utils/analysis_factory/plotting_functionality.py b/pycqed/qce_utils/analysis_factory/plotting_functionality.py new file mode 100644 index 0000000000..5be8178b19 --- /dev/null +++ b/pycqed/qce_utils/analysis_factory/plotting_functionality.py @@ -0,0 +1,280 @@ +# ------------------------------------------- +# General plotting functionality. +# ------------------------------------------- +from abc import abstractmethod +from collections.abc import Iterable as ABCIterable +from typing import Callable, Tuple, Optional, Iterable, List, Union +import matplotlib.pyplot as plt +import numpy as np +from enum import Enum +from pycqed.qce_utils.custom_exceptions import InterfaceMethodException + +IFigureAxesPair = Tuple[plt.Figure, plt.Axes] +KEYWORD_LABEL_FORMAT = 'label_format' +KEYWORD_AXES_FORMAT = 'axes_format' +KEYWORD_HOST_AXES = 'host_axes' + + +class IAxesFormat: + """ + Interface for applying formatting changes to axis. + """ + # region Interface Methods + @abstractmethod + def apply_to_axes(self, axes: plt.Axes) -> plt.Axes: + """ + Applies axes formatting settings to axis. + :param axes: Axes to be formatted. + :return: Updated Axes. + """ + raise InterfaceMethodException + # endregion + + # region Static Class Methods + @staticmethod + @abstractmethod + def default() -> 'IAxesFormat': + """:return: Default formatting instance.""" + raise InterfaceMethodException + # endregion + + +class LabelFormat(IAxesFormat): + """ + Specifies callable formatting functions for both vector components. + """ + IFormatCall = Callable[[float], str] + _default_format: IFormatCall = lambda x: f'{round(x)}' + _default_label: str = 'Default Label [a.u.]' + _default_symbol: str = 'X' + + # region Class Properties + @property + def x_label(self) -> str: + """:return: Unit label for x-vector component.""" + return self._x_label + + @property + def y_label(self) -> str: + """:return: Unit label for y-vector component.""" + return self._y_label + + @property + def z_label(self) -> str: + """:return: Unit label for z-vector component.""" + return self._z_label + + @property + def x_format(self) -> IFormatCall: + """:return: Formatting function of x-vector component.""" + return self._x_format + + @property + def y_format(self) -> IFormatCall: + """:return: Formatting function of y-vector component.""" + return self._y_format + + @property + def z_format(self) -> IFormatCall: + """:return: Formatting function of z-vector component.""" + return self._z_format + + @property + def x_symbol(self) -> str: + """:return: Unit symbol for x-vector component.""" + return self._x_symbol + + @property + def y_symbol(self) -> str: + """:return: Unit symbol for y-vector component.""" + return self._y_symbol + + @property + def z_symbol(self) -> str: + """:return: Unit symbol for z-vector component.""" + return self._z_symbol + # endregion + + # region Class Constructor + def __init__( + self, + x_label: str = _default_label, + y_label: str = _default_label, + z_label: str = _default_label, + x_format: IFormatCall = _default_format, + y_format: IFormatCall = _default_format, + z_format: IFormatCall = _default_format, + x_symbol: str = _default_symbol, + y_symbol: str = _default_symbol, + z_symbol: str = _default_symbol + ): + self._x_label: str = x_label + self._y_label: str = y_label + self._z_label: str = z_label + self._x_format: LabelFormat.IFormatCall = x_format + self._y_format: LabelFormat.IFormatCall = y_format + self._z_format: LabelFormat.IFormatCall = z_format + self._x_symbol: str = x_symbol + self._y_symbol: str = y_symbol + self._z_symbol: str = z_symbol + # endregion + + # region Interface Methods + def apply_to_axes(self, axes: plt.Axes) -> plt.Axes: + """ + Applies label formatting settings to axis. + :param axes: Axes to be formatted. + :return: Updated Axes. + """ + axes.set_xlabel(self.x_label) + axes.set_ylabel(self.y_label) + if hasattr(axes, 'set_zlabel'): + axes.set_zlabel(self.z_label) + return axes + # endregion + + # region Static Class Methods + @staticmethod + def default() -> 'LabelFormat': + """:return: Default LabelFormat instance.""" + return LabelFormat() + # endregion + + +class AxesFormat(IAxesFormat): + """ + Specifies general axis formatting functions. + """ + + # region Interface Methods + def apply_to_axes(self, axes: plt.Axes) -> plt.Axes: + """ + Applies axes formatting settings to axis. + :param axes: Axes to be formatted. + :return: Updated Axes. + """ + axes.grid(True, alpha=0.5, linestyle='dashed') # Adds dashed gridlines + axes.set_axisbelow(True) # Puts grid on background + return axes + # endregion + + # region Static Class Methods + @staticmethod + def default() -> 'AxesFormat': + """:return: Default AxesFormat instance.""" + return AxesFormat() + # endregion + + +class EmptyAxesFormat(AxesFormat): + """ + Overwrites AxesFormat with 'null' functionality. + Basically leaving the axes unchanged. + """ + + # region Interface Methods + def apply_to_axes(self, axes: plt.Axes) -> plt.Axes: + """ + Applies axes formatting settings to axis. + :param axes: Axes to be formatted. + :return: Updated Axes. + """ + return axes + # endregion + + +class SubplotKeywordEnum(Enum): + """ + Constructs specific enumerator for construct_subplot() method accepted keyword arguments. + """ + LABEL_FORMAT = 'label_format' + AXES_FORMAT = 'axes_format' + HOST_AXES = 'host_axes' + PROJECTION = 'projection' + FIGURE_SIZE = 'figsize' + + +# TODO: Extend (or add) functionality to construct mosaic plots +def construct_subplot(*args, **kwargs) -> IFigureAxesPair: + """ + Extends plt.subplots() by optionally working from host_axes + and applying label- and axes formatting. + :param args: Positional arguments that are passed to plt.subplots() method. + :param kwargs: Key-word arguments that are passed to plt.subplots() method. + :keyword label_format: (Optional) Formatting settings for figure labels. + :keyword axes_format: (Optional) Formatting settings for figure axes. + :keyword host_axes: (Optional) figure-axes pair to which to write the plot instead. + If not supplied, create a new figure-axes pair. + :return: Tuple of plotted figure and axis. + """ + # Kwarg retrieval + label_format: IAxesFormat = kwargs.pop(SubplotKeywordEnum.LABEL_FORMAT.value, LabelFormat.default()) + axes_format: IAxesFormat = kwargs.pop(SubplotKeywordEnum.AXES_FORMAT.value, AxesFormat.default()) + host_axes: Tuple[plt.Figure, plt.Axes] = kwargs.pop(SubplotKeywordEnum.HOST_AXES.value, None) + projection: Optional[str] = kwargs.pop(SubplotKeywordEnum.PROJECTION.value, None) + + # Figure and axis + if host_axes is not None: + fig, ax0 = host_axes + else: + fig, ax0 = plt.subplots(*args, **kwargs) + + # region Dress Axes + axes: Iterable[plt.Axes] = [ax0] if not isinstance(ax0, ABCIterable) else ax0 + for _ax in axes: + _ax = label_format.apply_to_axes(axes=_ax) + _ax = axes_format.apply_to_axes(axes=_ax) + # endregion + + return fig, ax0 + + +def draw_object_summary(host: IFigureAxesPair, params: object, apply_tight_layout: bool = True) -> IFigureAxesPair: + """ + Adds text window with fit summary based on model parameter. + :param host: Tuple of figure and axis. + :param params: Any object (or model parameter container class) that implements .__str__() method. + :param apply_tight_layout: (Optional) Boolean, whether a tight layout call should be applied to figure. + :return: Tuple of plotted figure and axis. + """ + + def linebreaks_to_columns(source: List[str], column: int, column_spacing: int) -> str: + """ + Attempts to insert tab spacing between source elements to create the visual illusion of columns. + :param source: Array-like of string elements to be placed in column-like structure. + :param column: Integer number of (maximum) columns. + :param column_spacing: Integer column spacing in character units. + :return: Single string with tabs to create column-like behaviour. + """ + # Data allocation + source_count: int = len(source) + desired_count: int = -(source_count // -column) * column # 'Upside down' floor division. + pad_count: int = desired_count - source_count + padded_source: List[str] = source + [''] * pad_count + slice_idx: List[Tuple[int, int]] = [(i * column, (i + 1) * column) for i in range(desired_count // column)] + result: str = '' + for i, (lb, ub) in enumerate(slice_idx): + row_elems = padded_source[lb: ub] + linebreak: str = '' if i == len(slice_idx) - 1 else '\t\n' # Only linebreak if there is another line coming + result += ('\t'.join(row_elems) + linebreak).expandtabs(tabsize=column_spacing) + return result + + fig, ax0 = host + text_str: str = params.__str__() + fontsize: int = 10 + ax0.text( + x=1.05, + y=0.99, + s=text_str, + fontdict=dict(horizontalalignment='left'), + transform=ax0.transAxes, + fontsize=fontsize, + verticalalignment='top', + horizontalalignment='left', + bbox=dict(boxstyle='round', facecolor='#C5C5C5', alpha=0.5), + linespacing=1.6, + ) + if apply_tight_layout: + fig.tight_layout() + + return fig, ax0 diff --git a/pycqed/qce_utils/control_interfaces/__init__.py b/pycqed/qce_utils/control_interfaces/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pycqed/qce_utils/control_interfaces/connectivity_surface_code.py b/pycqed/qce_utils/control_interfaces/connectivity_surface_code.py new file mode 100644 index 0000000000..fecdd8beaa --- /dev/null +++ b/pycqed/qce_utils/control_interfaces/connectivity_surface_code.py @@ -0,0 +1,783 @@ +# ------------------------------------------- +# Module containing implementation of surface-code connectivity structure. +# ------------------------------------------- +from dataclasses import dataclass, field +import warnings +from typing import List, Union, Dict, Tuple +from enum import Enum, unique, auto +from pycqed.qce_utils.definitions import SingletonABCMeta +from pycqed.qce_utils.custom_exceptions import ElementNotIncludedException +from pycqed.qce_utils.control_interfaces.intrf_channel_identifier import ( + IChannelIdentifier, + IFeedlineID, + IQubitID, + IEdgeID, + FeedlineIDObj, + QubitIDObj, + EdgeIDObj, +) +from pycqed.qce_utils.control_interfaces.intrf_connectivity_surface_code import ( + ISurfaceCodeLayer, + IParityGroup, + ParityType, +) +from pycqed.qce_utils.control_interfaces.intrf_connectivity import ( + IDeviceLayer +) + + +@unique +class FrequencyGroup(Enum): + LOW = auto() + MID = auto() + HIGH = auto() + + +@dataclass(frozen=True) +class FrequencyGroupIdentifier: + """ + Data class, representing (qubit) frequency group identifier. + """ + _id: FrequencyGroup + + # region Class Properties + @property + def id(self) -> FrequencyGroup: + """:return: Self identifier.""" + return self._id + # endregion + + # region Class Methods + def is_equal_to(self, other: 'FrequencyGroupIdentifier') -> bool: + """:return: Boolean, whether other frequency group identifier is equal self.""" + return self.id == other.id + + def is_higher_than(self, other: 'FrequencyGroupIdentifier') -> bool: + """:return: Boolean, whether other frequency group identifier is 'lower' than self.""" + # Guard clause, if frequency groups are equal, return False + if self.is_equal_to(other): + return False + if self.id == FrequencyGroup.MID and other.id == FrequencyGroup.LOW: + return True + if self.id == FrequencyGroup.HIGH: + return True + return False + + def is_lower_than(self, other: 'FrequencyGroupIdentifier') -> bool: + """:return: Boolean, whether other frequency group identifier is 'higher' than self.""" + # Guard clause, if frequency groups are equal, return False + if self.is_equal_to(other): + return False + if self.is_higher_than(other): + return False + return True + # endregion + + +@dataclass(frozen=True) +class DirectionalEdgeIDObj(EdgeIDObj, IEdgeID): + """ + Data class, implementing IEdgeID interface. + Overwrites __hash__ and __eq__ to make qubit-to-qubit direction relevant. + """ + + # region Class Methods + def __hash__(self): + """ + Sorts individual qubit hashes such that the order is NOT maintained. + Making hash comparison independent of order. + """ + return hash((self.qubit_id0.__hash__(), self.qubit_id1.__hash__())) + + def __eq__(self, other): + if isinstance(other, DirectionalEdgeIDObj): + # Edge is equal if they share the same qubit identifiers, order does not matter + return other.__hash__() == self.__hash__() + if isinstance(other, EdgeIDObj): + warnings.warn(message=f"Comparing directional edge to non-directional edge returns False by default.") + return False + return False + # endregion + + +@dataclass(frozen=True) +class ParityGroup(IParityGroup): + """ + Data class, implementing IParityGroup interface. + """ + _parity_type: ParityType = field(init=True) + """X or Z type stabilizer.""" + _ancilla_qubit: IQubitID = field(init=True) + """Ancilla qubit.""" + _data_qubits: List[IQubitID] = field(init=True) + """Data qubits.""" + _edges: List[IEdgeID] = field(init=False) + """Edges between ancilla and data qubits.""" + + # region Interface Properties + @property + def parity_type(self) -> ParityType: + """:return: Parity type (X or Z type stabilizer).""" + return self._parity_type + + @property + def ancilla_id(self) -> IQubitID: + """:return: (Main) ancilla-qubit-ID from parity.""" + return self._ancilla_qubit + + @property + def data_ids(self) -> List[IQubitID]: + """:return: (All) data-qubit-ID's from parity.""" + return self._data_qubits + + @property + def edge_ids(self) -> List[IEdgeID]: + """:return: (All) edge-ID's between ancilla and data qubit-ID's.""" + return self._edges + # endregion + + # region Interface Methods + def contains(self, element: Union[IQubitID, IEdgeID]) -> bool: + """:return: Boolean, whether element is part of parity group or not.""" + if element in self.data_ids: + return True + if element in self.edge_ids: + return True + if element == self.ancilla_id: + return True + return False + # endregion + + # region Class Methods + def __post_init__(self): + edges: List[IEdgeID] = [ + EdgeIDObj( + qubit_id0=self.ancilla_id, + qubit_id1=data_qubit_id, + ) + for data_qubit_id in self.data_ids + ] + object.__setattr__(self, '_edges', edges) + # endregion + + +@dataclass(frozen=True) +class FluxDanceLayer: + """ + Data class, containing directional gates played during 'flux-dance' layer. + """ + _edge_ids: List[IEdgeID] + """Non-directional edges, part of flux-dance layer.""" + + # region Class Properties + @property + def qubit_ids(self) -> List[IQubitID]: + """:return: All qubit-ID's.""" + return list(set([qubit_id for edge in self.edge_ids for qubit_id in edge.qubit_ids])) + + @property + def edge_ids(self) -> List[IEdgeID]: + """:return: Array-like of directional edge identifiers, specific for this flux dance.""" + return self._edge_ids + # endregion + + # region Class Methods + def contains(self, element: Union[IQubitID, IEdgeID]) -> bool: + """:return: Boolean, whether element is part of flux-dance layer or not.""" + if element in self.qubit_ids: + return True + if element in self.edge_ids: + return True + return False + + def get_involved_edge(self, qubit_id: IQubitID) -> IEdgeID: + """:return: Edge in which qubit-ID is involved. If qubit-ID not part of self, raise error.""" + for edge in self.edge_ids: + if edge.contains(element=qubit_id): + return edge + raise ElementNotIncludedException(f'Element {qubit_id} is not part of self ({self}) and cannot be part of an edge.') + + def get_spectating_qubit_ids(self, device_layer: IDeviceLayer) -> List[IQubitID]: + """:return: Direct spectator (nearest neighbor) to qubit-ID's participating in flux-dance.""" + participating_qubit_ids: List[IQubitID] = self.qubit_ids + nearest_neighbor_ids: List[IQubitID] = [neighbor_id for qubit_id in participating_qubit_ids for neighbor_id in device_layer.get_neighbors(qubit_id, order=1)] + filtered_nearest_neighbor_ids: List[IQubitID] = list(set([qubit_id for qubit_id in nearest_neighbor_ids if qubit_id not in participating_qubit_ids])) + return filtered_nearest_neighbor_ids + + def requires_parking(self, qubit_id: IQubitID, device_layer: ISurfaceCodeLayer) -> bool: + """ + Determines whether qubit-ID is required to park based on participation in flux dance and frequency group. + :return: Boolean, whether qubit-ID requires some form of parking. + """ + spectating_qubit_ids: List[IQubitID] = self.get_spectating_qubit_ids(device_layer=device_layer) + # Guard clause, if qubit-ID does not spectate the flux-dance, no need for parking + if qubit_id not in spectating_qubit_ids: + return False + # Check if qubit-ID requires parking based on its frequency group ID and active two-qubit gates. + frequency_group: FrequencyGroupIdentifier = device_layer.get_frequency_group_identifier(element=qubit_id) + # Parking is required if any neighboring qubit from a higher frequency group is part of an edge. + neighboring_qubit_ids: List[IQubitID] = device_layer.get_neighbors(qubit=qubit_id, order=1) + involved_neighbors: List[IQubitID] = [qubit_id for qubit_id in neighboring_qubit_ids if self.contains(qubit_id)] + involved_frequency_groups: List[FrequencyGroupIdentifier] = [device_layer.get_frequency_group_identifier(element=qubit_id) for qubit_id in involved_neighbors] + return any([neighbor_frequency_group.is_higher_than(frequency_group) for neighbor_frequency_group in involved_frequency_groups]) + # endregion + + + +@dataclass(frozen=True) +class VirtualPhaseIdentifier(IChannelIdentifier): + """ + Data class, describing (code-word) identifier for virtual phase. + """ + _id: str + + # region Interface Properties + @property + def id(self) -> str: + """:returns: Reference Identifier.""" + return self._id + # endregion + + # region Interface Methods + def __hash__(self): + """:returns: Identifiable hash.""" + return self._id.__hash__() + + def __eq__(self, other): + """:returns: Boolean if other shares equal identifier, else InterfaceMethodException.""" + if isinstance(other, VirtualPhaseIdentifier): + return self.id.__eq__(other.id) + return False + # endregion + + +@dataclass(frozen=True) +class FluxOperationIdentifier(IChannelIdentifier): + """ + Data class, describing (code-word) identifier for flux operation. + """ + _id: str + + # region Interface Properties + @property + def id(self) -> str: + """:returns: Reference Identifier.""" + return self._id + # endregion + + # region Interface Methods + def __hash__(self): + """:returns: Identifiable hash.""" + return self._id.__hash__() + + def __eq__(self, other): + """:returns: Boolean if other shares equal identifier, else InterfaceMethodException.""" + if isinstance(other, FluxOperationIdentifier): + return self.id.__eq__(other.id) + return False + # endregion + + +class Surface17Layer(ISurfaceCodeLayer, metaclass=SingletonABCMeta): + """ + Singleton class, implementing ISurfaceCodeLayer interface to describe a surface-17 layout. + """ + _feedline_qubit_lookup: Dict[IFeedlineID, List[IQubitID]] = { + FeedlineIDObj('FL1'): [QubitIDObj('D9'), QubitIDObj('D8'), QubitIDObj('X4'), QubitIDObj('Z4'), QubitIDObj('Z2'), QubitIDObj('D6')], + FeedlineIDObj('FL2'): [QubitIDObj('D3'), QubitIDObj('D7'), QubitIDObj('D2'), QubitIDObj('X3'), QubitIDObj('Z1'), QubitIDObj('X2'), QubitIDObj('Z3'), QubitIDObj('D5'), QubitIDObj('D4')], + FeedlineIDObj('FL3'): [QubitIDObj('D1'), QubitIDObj('X1')], + } + _qubit_edges: List[IEdgeID] = [ + EdgeIDObj(QubitIDObj('D1'), QubitIDObj('Z1')), + EdgeIDObj(QubitIDObj('D1'), QubitIDObj('X1')), + EdgeIDObj(QubitIDObj('D2'), QubitIDObj('X1')), + EdgeIDObj(QubitIDObj('D2'), QubitIDObj('Z1')), + EdgeIDObj(QubitIDObj('D2'), QubitIDObj('X2')), + EdgeIDObj(QubitIDObj('D3'), QubitIDObj('X2')), + EdgeIDObj(QubitIDObj('D3'), QubitIDObj('Z2')), + EdgeIDObj(QubitIDObj('D4'), QubitIDObj('Z3')), + EdgeIDObj(QubitIDObj('D4'), QubitIDObj('X3')), + EdgeIDObj(QubitIDObj('D4'), QubitIDObj('Z1')), + EdgeIDObj(QubitIDObj('D5'), QubitIDObj('Z1')), + EdgeIDObj(QubitIDObj('D5'), QubitIDObj('X3')), + EdgeIDObj(QubitIDObj('D5'), QubitIDObj('Z4')), + EdgeIDObj(QubitIDObj('D5'), QubitIDObj('X2')), + EdgeIDObj(QubitIDObj('D6'), QubitIDObj('X2')), + EdgeIDObj(QubitIDObj('D6'), QubitIDObj('Z4')), + EdgeIDObj(QubitIDObj('D6'), QubitIDObj('Z2')), + EdgeIDObj(QubitIDObj('D7'), QubitIDObj('Z3')), + EdgeIDObj(QubitIDObj('D7'), QubitIDObj('X3')), + EdgeIDObj(QubitIDObj('D8'), QubitIDObj('X3')), + EdgeIDObj(QubitIDObj('D8'), QubitIDObj('X4')), + EdgeIDObj(QubitIDObj('D8'), QubitIDObj('Z4')), + EdgeIDObj(QubitIDObj('D9'), QubitIDObj('Z4')), + EdgeIDObj(QubitIDObj('D9'), QubitIDObj('X4')), + ] + _parity_group_x: List[IParityGroup] = [ + ParityGroup( + _parity_type=ParityType.STABILIZER_X, + _ancilla_qubit=QubitIDObj('X1'), + _data_qubits=[QubitIDObj('D1'), QubitIDObj('D2')] + ), + ParityGroup( + _parity_type=ParityType.STABILIZER_X, + _ancilla_qubit=QubitIDObj('X2'), + _data_qubits=[QubitIDObj('D2'), QubitIDObj('D3'), QubitIDObj('D5'), QubitIDObj('D6')] + ), + ParityGroup( + _parity_type=ParityType.STABILIZER_X, + _ancilla_qubit=QubitIDObj('X3'), + _data_qubits=[QubitIDObj('D4'), QubitIDObj('D5'), QubitIDObj('D7'), QubitIDObj('D8')] + ), + ParityGroup( + _parity_type=ParityType.STABILIZER_X, + _ancilla_qubit=QubitIDObj('X4'), + _data_qubits=[QubitIDObj('D8'), QubitIDObj('D9')] + ), + ] + _parity_group_z: List[IParityGroup] = [ + ParityGroup( + _parity_type=ParityType.STABILIZER_Z, + _ancilla_qubit=QubitIDObj('Z1'), + _data_qubits=[QubitIDObj('D1'), QubitIDObj('D2'), QubitIDObj('D4'), QubitIDObj('D5')] + ), + ParityGroup( + _parity_type=ParityType.STABILIZER_Z, + _ancilla_qubit=QubitIDObj('Z2'), + _data_qubits=[QubitIDObj('D3'), QubitIDObj('D6')] + ), + ParityGroup( + _parity_type=ParityType.STABILIZER_Z, + _ancilla_qubit=QubitIDObj('Z3'), + _data_qubits=[QubitIDObj('D4'), QubitIDObj('D7')] + ), + ParityGroup( + _parity_type=ParityType.STABILIZER_Z, + _ancilla_qubit=QubitIDObj('Z4'), + _data_qubits=[QubitIDObj('D5'), QubitIDObj('D6'), QubitIDObj('D8'), QubitIDObj('D9')] + ), + ] + _frequency_group_lookup: Dict[IQubitID, FrequencyGroupIdentifier] = { + QubitIDObj('D1'): FrequencyGroupIdentifier(_id=FrequencyGroup.LOW), + QubitIDObj('D2'): FrequencyGroupIdentifier(_id=FrequencyGroup.LOW), + QubitIDObj('D3'): FrequencyGroupIdentifier(_id=FrequencyGroup.LOW), + QubitIDObj('D4'): FrequencyGroupIdentifier(_id=FrequencyGroup.HIGH), + QubitIDObj('D5'): FrequencyGroupIdentifier(_id=FrequencyGroup.HIGH), + QubitIDObj('D6'): FrequencyGroupIdentifier(_id=FrequencyGroup.HIGH), + QubitIDObj('D7'): FrequencyGroupIdentifier(_id=FrequencyGroup.LOW), + QubitIDObj('D8'): FrequencyGroupIdentifier(_id=FrequencyGroup.LOW), + QubitIDObj('D9'): FrequencyGroupIdentifier(_id=FrequencyGroup.LOW), + QubitIDObj('Z1'): FrequencyGroupIdentifier(_id=FrequencyGroup.MID), + QubitIDObj('Z2'): FrequencyGroupIdentifier(_id=FrequencyGroup.MID), + QubitIDObj('Z3'): FrequencyGroupIdentifier(_id=FrequencyGroup.MID), + QubitIDObj('Z4'): FrequencyGroupIdentifier(_id=FrequencyGroup.MID), + QubitIDObj('X1'): FrequencyGroupIdentifier(_id=FrequencyGroup.MID), + QubitIDObj('X2'): FrequencyGroupIdentifier(_id=FrequencyGroup.MID), + QubitIDObj('X3'): FrequencyGroupIdentifier(_id=FrequencyGroup.MID), + QubitIDObj('X4'): FrequencyGroupIdentifier(_id=FrequencyGroup.MID), + } + + # region ISurfaceCodeLayer Interface Properties + @property + def parity_group_x(self) -> List[IParityGroup]: + """:return: (All) parity groups part of X-stabilizers.""" + return self._parity_group_x + + @property + def parity_group_z(self) -> List[IParityGroup]: + """:return: (All) parity groups part of Z-stabilizers.""" + return self._parity_group_z + # endregion + + # region Class Properties + @property + def feedline_ids(self) -> List[IFeedlineID]: + """:return: All feedline-ID's.""" + return list(self._feedline_qubit_lookup.keys()) + + @property + def qubit_ids(self) -> List[IQubitID]: + """:return: All qubit-ID's.""" + return [qubit_id for qubit_ids in self._feedline_qubit_lookup.values() for qubit_id in qubit_ids] + + @property + def edge_ids(self) -> List[IEdgeID]: + """:return: All edge-ID's.""" + return self._qubit_edges + # endregion + + # region ISurfaceCodeLayer Interface Methods + def get_parity_group(self, element: Union[IQubitID, IEdgeID]) -> IParityGroup: + """:return: Parity group of which element (edge- or qubit-ID) is part of.""" + # Assumes element is part of only a single parity group + for parity_group in self.parity_group_x + self.parity_group_z: + if parity_group.contains(element=element): + return parity_group + raise ElementNotIncludedException(f"Element: {element} is not included in any parity group.") + # endregion + + # region IDeviceLayer Interface Methods + def get_connected_qubits(self, feedline: IFeedlineID) -> List[IQubitID]: + """:return: Qubit-ID's connected to feedline-ID.""" + # Guard clause, if feedline not in lookup, raise exception + if feedline not in self._feedline_qubit_lookup: + raise ElementNotIncludedException(f"Element: {feedline} is not included in any feedline group.") + return self._feedline_qubit_lookup[feedline] + + def get_neighbors(self, qubit: IQubitID, order: int = 1) -> List[IQubitID]: + """ + Requires :param order: to be higher or equal to 1. + :return: qubit neighbors separated by order. (order=1, nearest neighbors). + """ + if order > 1: + raise NotImplementedError("Apologies, so far there has not been a use for. But feel free to implement.") + edges: List[IEdgeID] = self.get_edges(qubit=qubit) + result: List[IQubitID] = [] + for edge in edges: + result.append(edge.get_connected_qubit_id(element=qubit)) + return result + + def get_edges(self, qubit: IQubitID) -> List[IEdgeID]: + """:return: All qubit-to-qubit edges from qubit-ID.""" + result: List[IEdgeID] = [] + for edge in self.edge_ids: + if edge.contains(element=qubit): + result.append(edge) + return result + + def contains(self, element: Union[IFeedlineID, IQubitID, IEdgeID]) -> bool: + """:return: Boolean, whether element is part of device layer or not.""" + if element in self.feedline_ids: + return True + if element in self.qubit_ids: + return True + if element in self.edge_ids: + return True + return False + + def get_frequency_group_identifier(self, element: IQubitID) -> FrequencyGroupIdentifier: + """:return: Frequency group identifier based on qubit-ID.""" + return self._frequency_group_lookup[element] + # endregion + + +class Repetition9Layer(ISurfaceCodeLayer, metaclass=SingletonABCMeta): + """ + Singleton class, implementing ISurfaceCodeLayer interface to describe a repetition-9 layout. + """ + _parity_group_x: List[IParityGroup] = [ + ParityGroup( + _parity_type=ParityType.STABILIZER_X, + _ancilla_qubit=QubitIDObj('X1'), + _data_qubits=[QubitIDObj('D2'), QubitIDObj('D1')] + ), + ParityGroup( + _parity_type=ParityType.STABILIZER_X, + _ancilla_qubit=QubitIDObj('X2'), + _data_qubits=[QubitIDObj('D2'), QubitIDObj('D3')] + ), + ParityGroup( + _parity_type=ParityType.STABILIZER_X, + _ancilla_qubit=QubitIDObj('X3'), + _data_qubits=[QubitIDObj('D8'), QubitIDObj('D7')] + ), + ParityGroup( + _parity_type=ParityType.STABILIZER_X, + _ancilla_qubit=QubitIDObj('X4'), + _data_qubits=[QubitIDObj('D9'), QubitIDObj('D8')] + ), + ] + _parity_group_z: List[IParityGroup] = [ + ParityGroup( + _parity_type=ParityType.STABILIZER_Z, + _ancilla_qubit=QubitIDObj('Z1'), + _data_qubits=[QubitIDObj('D4'), QubitIDObj('D5')] + ), + ParityGroup( + _parity_type=ParityType.STABILIZER_Z, + _ancilla_qubit=QubitIDObj('Z2'), + _data_qubits=[QubitIDObj('D6'), QubitIDObj('D3')] + ), + ParityGroup( + _parity_type=ParityType.STABILIZER_Z, + _ancilla_qubit=QubitIDObj('Z3'), + _data_qubits=[QubitIDObj('D7'), QubitIDObj('D4')] + ), + ParityGroup( + _parity_type=ParityType.STABILIZER_Z, + _ancilla_qubit=QubitIDObj('Z4'), + _data_qubits=[QubitIDObj('D5'), QubitIDObj('D6')] + ), + ] + _virtual_phase_lookup: Dict[DirectionalEdgeIDObj, VirtualPhaseIdentifier] = { + DirectionalEdgeIDObj(QubitIDObj('D1'), QubitIDObj('Z1')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NE'), + DirectionalEdgeIDObj(QubitIDObj('Z1'), QubitIDObj('D1')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SW'), + DirectionalEdgeIDObj(QubitIDObj('D1'), QubitIDObj('X1')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SE'), + DirectionalEdgeIDObj(QubitIDObj('X1'), QubitIDObj('D1')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NW'), + DirectionalEdgeIDObj(QubitIDObj('D2'), QubitIDObj('X1')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SW'), + DirectionalEdgeIDObj(QubitIDObj('X1'), QubitIDObj('D2')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NE'), + DirectionalEdgeIDObj(QubitIDObj('D2'), QubitIDObj('Z1')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NW'), + DirectionalEdgeIDObj(QubitIDObj('Z1'), QubitIDObj('D2')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SE'), + DirectionalEdgeIDObj(QubitIDObj('D2'), QubitIDObj('X2')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NE'), + DirectionalEdgeIDObj(QubitIDObj('X2'), QubitIDObj('D2')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SW'), + DirectionalEdgeIDObj(QubitIDObj('D3'), QubitIDObj('X2')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NW'), + DirectionalEdgeIDObj(QubitIDObj('X2'), QubitIDObj('D3')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SE'), + DirectionalEdgeIDObj(QubitIDObj('D3'), QubitIDObj('Z2')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NE'), + DirectionalEdgeIDObj(QubitIDObj('Z2'), QubitIDObj('D3')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SW'), + DirectionalEdgeIDObj(QubitIDObj('D4'), QubitIDObj('Z3')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NW'), + DirectionalEdgeIDObj(QubitIDObj('Z3'), QubitIDObj('D4')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SE'), + DirectionalEdgeIDObj(QubitIDObj('D4'), QubitIDObj('X3')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NE'), + DirectionalEdgeIDObj(QubitIDObj('X3'), QubitIDObj('D4')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SW'), + DirectionalEdgeIDObj(QubitIDObj('D4'), QubitIDObj('Z1')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SE'), + DirectionalEdgeIDObj(QubitIDObj('Z1'), QubitIDObj('D4')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NW'), + DirectionalEdgeIDObj(QubitIDObj('D5'), QubitIDObj('Z1')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SW'), + DirectionalEdgeIDObj(QubitIDObj('Z1'), QubitIDObj('D5')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NE'), + DirectionalEdgeIDObj(QubitIDObj('D5'), QubitIDObj('X3')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NW'), + DirectionalEdgeIDObj(QubitIDObj('X3'), QubitIDObj('D5')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SE'), + DirectionalEdgeIDObj(QubitIDObj('D5'), QubitIDObj('Z4')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NE'), + DirectionalEdgeIDObj(QubitIDObj('Z4'), QubitIDObj('D5')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SW'), + DirectionalEdgeIDObj(QubitIDObj('D5'), QubitIDObj('X2')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SE'), + DirectionalEdgeIDObj(QubitIDObj('X2'), QubitIDObj('D5')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NW'), + DirectionalEdgeIDObj(QubitIDObj('D6'), QubitIDObj('X2')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SW'), + DirectionalEdgeIDObj(QubitIDObj('X2'), QubitIDObj('D6')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NE'), + DirectionalEdgeIDObj(QubitIDObj('D6'), QubitIDObj('Z4')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NW'), + DirectionalEdgeIDObj(QubitIDObj('Z4'), QubitIDObj('D6')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SE'), + DirectionalEdgeIDObj(QubitIDObj('D6'), QubitIDObj('Z2')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SE'), + DirectionalEdgeIDObj(QubitIDObj('Z2'), QubitIDObj('D6')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NW'), + DirectionalEdgeIDObj(QubitIDObj('D7'), QubitIDObj('Z3')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SW'), + DirectionalEdgeIDObj(QubitIDObj('Z3'), QubitIDObj('D7')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NE'), + DirectionalEdgeIDObj(QubitIDObj('D7'), QubitIDObj('X3')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SE'), + DirectionalEdgeIDObj(QubitIDObj('X3'), QubitIDObj('D7')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NW'), + DirectionalEdgeIDObj(QubitIDObj('D8'), QubitIDObj('X3')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SW'), + DirectionalEdgeIDObj(QubitIDObj('X3'), QubitIDObj('D8')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NE'), + DirectionalEdgeIDObj(QubitIDObj('D8'), QubitIDObj('X4')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NE'), + DirectionalEdgeIDObj(QubitIDObj('X4'), QubitIDObj('D8')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SW'), + DirectionalEdgeIDObj(QubitIDObj('D8'), QubitIDObj('Z4')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SE'), + DirectionalEdgeIDObj(QubitIDObj('Z4'), QubitIDObj('D8')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NW'), + DirectionalEdgeIDObj(QubitIDObj('D9'), QubitIDObj('Z4')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SW'), + DirectionalEdgeIDObj(QubitIDObj('Z4'), QubitIDObj('D9')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NE'), + DirectionalEdgeIDObj(QubitIDObj('D9'), QubitIDObj('X4')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_NW'), + DirectionalEdgeIDObj(QubitIDObj('X4'), QubitIDObj('D9')): VirtualPhaseIdentifier('vcz_virtual_q_ph_corr_SE'), + } + _flux_dances: List[Tuple[FluxDanceLayer, FluxOperationIdentifier]] = [ + ( + FluxDanceLayer( + _edge_ids=[ + EdgeIDObj(QubitIDObj('X1'), QubitIDObj('D1')), + EdgeIDObj(QubitIDObj('Z1'), QubitIDObj('D4')), + EdgeIDObj(QubitIDObj('X3'), QubitIDObj('D7')), + EdgeIDObj(QubitIDObj('Z2'), QubitIDObj('D6')), + ] + ), + FluxOperationIdentifier(_id='repetition_code_1') + ), + ( + FluxDanceLayer( + _edge_ids=[ + EdgeIDObj(QubitIDObj('X1'), QubitIDObj('D2')), + EdgeIDObj(QubitIDObj('Z1'), QubitIDObj('D5')), + EdgeIDObj(QubitIDObj('X3'), QubitIDObj('D8')), + EdgeIDObj(QubitIDObj('Z2'), QubitIDObj('D3')), + ] + ), + FluxOperationIdentifier(_id='repetition_code_2') + ), + ( + FluxDanceLayer( + _edge_ids=[ + EdgeIDObj(QubitIDObj('Z3'), QubitIDObj('D7')), + EdgeIDObj(QubitIDObj('X4'), QubitIDObj('D8')), + EdgeIDObj(QubitIDObj('Z4'), QubitIDObj('D5')), + EdgeIDObj(QubitIDObj('X2'), QubitIDObj('D2')), + ] + ), + FluxOperationIdentifier(_id='repetition_code_3') + ), + ( + FluxDanceLayer( + _edge_ids=[ + EdgeIDObj(QubitIDObj('Z3'), QubitIDObj('D4')), + EdgeIDObj(QubitIDObj('X4'), QubitIDObj('D9')), + EdgeIDObj(QubitIDObj('Z4'), QubitIDObj('D6')), + EdgeIDObj(QubitIDObj('X2'), QubitIDObj('D3')), + ] + ), + FluxOperationIdentifier(_id='repetition_code_4') + ), + ] + + # region ISurfaceCodeLayer Interface Properties + @property + def parity_group_x(self) -> List[IParityGroup]: + """:return: (All) parity groups part of X-stabilizers.""" + return self._parity_group_x + + @property + def parity_group_z(self) -> List[IParityGroup]: + """:return: (All) parity groups part of Z-stabilizers.""" + return self._parity_group_z + # endregion + + # region Class Properties + @property + def feedline_ids(self) -> List[IFeedlineID]: + """:return: All feedline-ID's.""" + return Surface17Layer().feedline_ids + + @property + def qubit_ids(self) -> List[IQubitID]: + """:return: All qubit-ID's.""" + return Surface17Layer().qubit_ids + + @property + def edge_ids(self) -> List[IEdgeID]: + """:return: All edge-ID's.""" + return Surface17Layer().edge_ids + # endregion + + # region ISurfaceCodeLayer Interface Methods + def get_parity_group(self, element: Union[IQubitID, IEdgeID]) -> IParityGroup: + """:return: Parity group of which element (edge- or qubit-ID) is part of.""" + # Assumes element is part of only a single parity group + for parity_group in self.parity_group_x + self.parity_group_z: + if parity_group.contains(element=element): + return parity_group + raise ElementNotIncludedException(f"Element: {element} is not included in any parity group.") + # endregion + + # region IGateDanceLayer Interface Methods + def get_flux_dance_at_round(self, index: int) -> FluxDanceLayer: + """:return: Flux-dance object based on round index.""" + try: + flux_dance_layer: FluxDanceLayer = self._flux_dances[index] + return flux_dance_layer + except: + raise ElementNotIncludedException(f"Index: {index} is out of bounds for flux dance of length: {len(self._flux_dances)}.") + # endregion + + # region IDeviceLayer Interface Methods + def get_connected_qubits(self, feedline: IFeedlineID) -> List[IQubitID]: + """:return: Qubit-ID's connected to feedline-ID.""" + return Surface17Layer().get_connected_qubits(feedline=feedline) + + def get_neighbors(self, qubit: IQubitID, order: int = 1) -> List[IQubitID]: + """ + Requires :param order: to be higher or equal to 1. + :return: qubit neighbors separated by order. (order=1, nearest neighbors). + """ + return Surface17Layer().get_neighbors(qubit=qubit, order=order) + + def get_edges(self, qubit: IQubitID) -> List[IEdgeID]: + """:return: All qubit-to-qubit edges from qubit-ID.""" + return Surface17Layer().get_edges(qubit=qubit) + + def contains(self, element: Union[IFeedlineID, IQubitID, IEdgeID]) -> bool: + """:return: Boolean, whether element is part of device layer or not.""" + return Surface17Layer().contains(element=element) + # endregion + + # region Class Methods + def _get_flux_dance_layer(self, element: IEdgeID) -> FluxDanceLayer: + """:return: Flux-dance layer of which edge element is part of.""" + # Assumes element is part of only a single flux-dance layer + for flux_dance_layer, _ in self._flux_dances: + if flux_dance_layer.contains(element=element): + return flux_dance_layer + raise ElementNotIncludedException(f"Element: {element} is not included in any flux-dance layer.") + + def _get_flux_operation_identifier(self, element: IEdgeID) -> FluxOperationIdentifier: + """:return: Identifier describing flux-dance layer.""" + for flux_dance_layer, flux_operation_identifier in self._flux_dances: + if flux_dance_layer.contains(element=element): + return flux_operation_identifier + raise ElementNotIncludedException(f"Element: {element} is not included in any flux-dance layer.") + + + def get_flux_operation_identifier(self, qubit_id0: str, qubit_id1: str) -> str: + """:return: Identifier describing flux-dance layer.""" + edge: IEdgeID = EdgeIDObj( + qubit_id0=QubitIDObj(_id=qubit_id0), + qubit_id1=QubitIDObj(_id=qubit_id1), + ) + return self._get_flux_operation_identifier(element=edge).id + + def get_edge_flux_operation_identifier(self, ancilla_qubit: str) -> List[str]: + """:return: Identifier describing flux-dance layer.""" + qubit_id: IQubitID = QubitIDObj(_id=ancilla_qubit) + parity_group: IParityGroup = self.get_parity_group(element=qubit_id) + return [ + self._get_flux_operation_identifier( + element=edge_id, + ).id + for edge_id in parity_group.edge_ids + ] + + def _get_virtual_phase_identifier(self, directional_edge: DirectionalEdgeIDObj) -> VirtualPhaseIdentifier: + """:return: Identifier for virtual phase correction. Based on element and parity group.""" + return self._virtual_phase_lookup[directional_edge] + + def get_virtual_phase_identifier(self, from_qubit: str, to_qubit: str) -> VirtualPhaseIdentifier: + """:return: Identifier for virtual phase correction. Based on element and parity group.""" + directional_edge: DirectionalEdgeIDObj = DirectionalEdgeIDObj( + qubit_id0=QubitIDObj(_id=from_qubit), + qubit_id1=QubitIDObj(_id=to_qubit), + ) + return self._get_virtual_phase_identifier(directional_edge=directional_edge) + + def get_ancilla_virtual_phase_identifier(self, ancilla_qubit: str) -> str: + """:return: Arbitrary virtual phase from ancilla used in parity group.""" + qubit_id: IQubitID = QubitIDObj(_id=ancilla_qubit) + parity_group: IParityGroup = self.get_parity_group(element=qubit_id) + directional_edge: DirectionalEdgeIDObj = DirectionalEdgeIDObj( + qubit_id0=parity_group.ancilla_id, + qubit_id1=parity_group.data_ids[0], + ) + return self._get_virtual_phase_identifier(directional_edge=directional_edge).id + + def get_data_virtual_phase_identifiers(self, ancilla_qubit: str) -> List[str]: + """:return: Arbitrary virtual phase from ancilla used in parity group.""" + qubit_id: IQubitID = QubitIDObj(_id=ancilla_qubit) + parity_group: IParityGroup = self.get_parity_group(element=qubit_id) + return [ + self._get_virtual_phase_identifier( + directional_edge=DirectionalEdgeIDObj( + qubit_id0=data_id, + qubit_id1=parity_group.ancilla_id, + ) + ).id + for data_id in parity_group.data_ids + ] + + def get_parity_data_identifier(self, ancilla_qubit: str) -> List[str]: + """ + Iterates over provided ancilla qubit ID's. + Construct corresponding IQubitID's. + Obtain corresponding IParityGroup's. + Flatten list of (unique) data qubit ID's part of these parity groups. + :return: Array-like of (unique) data qubit ID's part of ancilla qubit parity groups. + """ + ancilla_qubit_id: IQubitID = QubitIDObj(ancilla_qubit) + parity_group: IParityGroup = self.get_parity_group(element=ancilla_qubit_id) + data_qubit_ids: List[IQubitID] = [qubit_id for qubit_id in parity_group.data_ids] + return [qubit_id.id for qubit_id in data_qubit_ids] + + def get_parity_data_identifiers(self, ancilla_qubits: List[str]) -> List[str]: + """ + Iterates over provided ancilla qubit ID's. + Construct corresponding IQubitID's. + Obtain corresponding IParityGroup's. + Flatten list of (unique) data qubit ID's part of these parity groups. + :return: Array-like of (unique) data qubit ID's part of ancilla qubit parity groups. + """ + return [unique_qubit_id for ancilla_qubit in ancilla_qubits for unique_qubit_id in set(self.get_parity_data_identifier(ancilla_qubit=ancilla_qubit))] + + def get_frequency_group_identifier(self, element: IQubitID) -> FrequencyGroupIdentifier: + """:return: Frequency group identifier based on qubit-ID.""" + return Surface17Layer().get_frequency_group_identifier(element=element) + # endregion + + +if __name__ == '__main__': + + flux_dance_0 = Repetition9Layer().get_flux_dance_at_round(0) + print(flux_dance_0.edge_ids) diff --git a/pycqed/qce_utils/control_interfaces/intrf_channel_identifier.py b/pycqed/qce_utils/control_interfaces/intrf_channel_identifier.py new file mode 100644 index 0000000000..bdf6ffd944 --- /dev/null +++ b/pycqed/qce_utils/control_interfaces/intrf_channel_identifier.py @@ -0,0 +1,297 @@ +# ------------------------------------------- +# Interface for unique channel references +# For example: +# Qubit identifier, Feedline identifier, Flux channel identifier, etc. +# ------------------------------------------- +from abc import ABCMeta, abstractmethod, ABC +from dataclasses import dataclass, field +from typing import List, Dict +from pycqed.qce_utils.custom_exceptions import InterfaceMethodException, IsolatedGroupException + +QID = str # Might become int in future +QName = str + + +class IChannelIdentifier(ABC): + """ + Interface class, describing unique identifier. + """ + + # region Interface Properties + @property + @abstractmethod + def id(self) -> str: + """:returns: Reference Identifier.""" + raise InterfaceMethodException + # endregion + + # region Interface Methods + @abstractmethod + def __hash__(self): + """:returns: Identifiable hash.""" + raise InterfaceMethodException + + @abstractmethod + def __eq__(self, other): + """:returns: Boolean if other shares equal identifier, else InterfaceMethodException.""" + raise InterfaceMethodException + # endregion + + +class IQubitID(IChannelIdentifier, metaclass=ABCMeta): + """ + Interface for qubit reference. + """ + + # region Interface Properties + @property + @abstractmethod + def name(self) -> QName: + """:returns: Reference name for qubit.""" + raise InterfaceMethodException + # endregion + + +class IFeedlineID(IChannelIdentifier, metaclass=ABCMeta): + """ + Interface for feedline reference. + """ + pass + + +class IEdgeID(IChannelIdentifier, metaclass=ABCMeta): + """ + Interface class, for qubit-to-qubit edge reference. + """ + + # region Interface Properties + @property + def qubit_ids(self) -> List[IQubitID]: + """:return: All qubit-ID's.""" + raise InterfaceMethodException + # endregion + + # region Interface Methods + @abstractmethod + def contains(self, element: IQubitID) -> bool: + """:return: Boolean, whether element is part of edge or not.""" + raise InterfaceMethodException + + @abstractmethod + def get_connected_qubit_id(self, element: IQubitID) -> IQubitID: + """:return: Qubit-ID, connected to the other side of this edge.""" + raise InterfaceMethodException + # endregion + + +class IQubitIDGroups(ABC): + """ + Interface class, describing groups of IQubitID's. + """ + + # region Interface Properties + @property + @abstractmethod + def groups(self) -> List[List[IQubitID]]: + """:return: Array-like of grouped (array) IQubitID's.""" + raise InterfaceMethodException + # endregion + + # region Interface Methods + @abstractmethod + def get_group(self, group_member: IQubitID) -> List[IQubitID]: + """ + Returns empty list if group_member not part of this lookup. + :return: Array-like of group members. Including provided group_member. + """ + raise InterfaceMethodException + # endregion + + +@dataclass(frozen=True) +class QubitIDObj(IQubitID): + """ + Contains qubit label ID. + """ + _id: QName + + # region Interface Properties + @property + def id(self) -> QID: + """:returns: Reference ID for qubit.""" + return self._id + + @property + def name(self) -> QName: + """:returns: Reference name for qubit.""" + return self.id + # endregion + + # region Class Methods + def __hash__(self): + """:returns: Identifiable hash.""" + return self.id.__hash__() + + def __eq__(self, other): + """:returns: Boolean if other shares equal identifier, else InterfaceMethodException.""" + if isinstance(other, IQubitID): + return self.id.__eq__(other.id) + # raise NotImplementedError('QubitIDObj equality check to anything other than IQubitID interface is not implemented.') + return False + + def __repr__(self): + return f'{self.id}' + # endregion + + +@dataclass(frozen=True) +class QubitIDGroups(IQubitIDGroups): + """ + Data class, implementing IQubitIDGroups interface. + """ + group_lookup: Dict[IQubitID, int] = field(default_factory=dict) + """Lookup dictionary where each IQubitID is matched to a specific (integer) group identifier.""" + + # region Interface Properties + @property + def groups(self) -> List[List[IQubitID]]: + """:return: Array-like of grouped (array) IQubitID's.""" + return list(self.group_id_to_members.values()) + # endregion + + # region Class Properties + @property + def group_id_to_members(self) -> Dict[int, List[IQubitID]]: + """:return: Intermediate lookup table from group-id to its members.""" + group_lookup: Dict[int, List[IQubitID]] = {} + for qubit_id, group_id in self.group_lookup.items(): + if group_id not in group_lookup: + group_lookup[group_id] = [qubit_id] + else: + group_lookup[group_id].append(qubit_id) + return group_lookup + # endregion + + # region Interface Methods + def get_group(self, group_member: IQubitID) -> List[IQubitID]: + """ + Returns empty list if group_member not part of this lookup. + :return: Array-like of group members. Including provided group_member. + """ + group_id_to_members: Dict[int, List[IQubitID]] = self.group_id_to_members + # Guard clause, if provided group member not in this lookup, return empty list. + if group_member not in self.group_lookup: + return [] + group_id: int = self.group_lookup[group_member] + return group_id_to_members[group_id] + # endregion + + # region Class Methods + def __post_init__(self): + # Verify group member uniqueness. + all_group_members: List[IQubitID] = [qubit_id for group in self.groups for qubit_id in group] + isolated_groups: bool = len(set(all_group_members)) == len(all_group_members) + if not isolated_groups: + raise IsolatedGroupException(f'Expects all group members to be part of a single group.') + + @classmethod + def from_groups(cls, groups: List[List[IQubitID]]) -> 'QubitIDGroups': + """:return: Class method constructor based on list of groups of QUbitID's.""" + group_lookup: Dict[IQubitID, int] = {} + for group_id, group in enumerate(groups): + for qubit_id in group: + if qubit_id in group_lookup: + raise IsolatedGroupException(f'{qubit_id} is already in another group. Requires each group member to be part of only one group.') + group_lookup[qubit_id] = group_id + return QubitIDGroups( + group_lookup=group_lookup, + ) + # endregion + + +@dataclass(frozen=True) +class FeedlineIDObj(IFeedlineID): + """ + Data class, implementing IFeedlineID interface. + """ + name: QID + + # region Interface Properties + @property + def id(self) -> QID: + """:returns: Reference ID for feedline.""" + return self.name + # endregion + + # region Class Methods + def __hash__(self): + return self.id.__hash__() + + def __eq__(self, other): + if isinstance(other, IFeedlineID): + return self.id.__eq__(other.id) + # raise NotImplementedError('FeedlineIDObj equality check to anything other than IFeedlineID interface is not implemented.') + return False + + def __repr__(self): + return f'{self.id}' + # endregion + + +@dataclass(frozen=True) +class EdgeIDObj(IEdgeID): + """ + Data class, implementing IEdgeID interface. + """ + qubit_id0: IQubitID + """Arbitrary edge qubit-ID.""" + qubit_id1: IQubitID + """Arbitrary edge qubit-ID.""" + + # region Interface Properties + @property + def id(self) -> QID: + """:returns: Reference ID for edge.""" + return f"{self.qubit_id0.id}-{self.qubit_id1.id}" + + @property + def qubit_ids(self) -> List[IQubitID]: + """:return: All qubit-ID's.""" + return [self.qubit_id0, self.qubit_id1] + # endregion + + # region Interface Methods + def contains(self, element: IQubitID) -> bool: + """:return: Boolean, whether element is part of edge or not.""" + if element in [self.qubit_id0, self.qubit_id1]: + return True + return False + + def get_connected_qubit_id(self, element: IQubitID) -> IQubitID: + """:return: Qubit-ID, connected to the other side of this edge.""" + if element == self.qubit_id0: + return self.qubit_id1 + if element == self.qubit_id1: + return self.qubit_id0 + # If element is not part of this edge + raise ValueError(f"Element: {element} is not part of this edge: {self}") + # endregion + + # region Class Methods + def __hash__(self): + """ + Sorts individual qubit hashes such that the order is NOT maintained. + Making hash comparison independent of order. + """ + return hash((min(self.qubit_id0.__hash__(), self.qubit_id1.__hash__()), max(self.qubit_id0.__hash__(), self.qubit_id1.__hash__()))) + + def __eq__(self, other): + if isinstance(other, IEdgeID): + # Edge is equal if they share the same qubit identifiers, order does not matter + return other.contains(self.qubit_id0) and other.contains(self.qubit_id1) + # raise NotImplementedError('EdgeIDObj equality check to anything other than IEdgeID interface is not implemented.') + return False + + def __repr__(self): + return f'{self.id}' + # endregion diff --git a/pycqed/qce_utils/control_interfaces/intrf_connectivity.py b/pycqed/qce_utils/control_interfaces/intrf_connectivity.py new file mode 100644 index 0000000000..8730ef06cf --- /dev/null +++ b/pycqed/qce_utils/control_interfaces/intrf_connectivity.py @@ -0,0 +1,145 @@ +# ------------------------------------------- +# Module containing interface for device connectivity structure. +# ------------------------------------------- +from abc import ABC, ABCMeta, abstractmethod +from multipledispatch import dispatch +from typing import List, Tuple, Union +from pycqed.qce_utils.custom_exceptions import InterfaceMethodException +from pycqed.qce_utils.control_interfaces.intrf_channel_identifier import ( + IFeedlineID, + IQubitID, + IEdgeID, +) + + +class IIdentifier(ABC): + """ + Interface class, describing equality identifier method. + """ + + # region Interface Methods + @abstractmethod + def __eq__(self, other): + """:return: Boolean, whether 'other' equals 'self'.""" + raise InterfaceMethodException + # endregion + + +class INode(IIdentifier, metaclass=ABCMeta): + """ + Interface class, describing the node in a connectivity layer. + """ + + # region Interface Properties + @property + @abstractmethod + def edges(self) -> List['IEdge']: + """:return: (N) Edges connected to this node.""" + raise InterfaceMethodException + # endregion + + +class IEdge(IIdentifier, metaclass=ABCMeta): + """ + Interface class, describing a connection between two nodes. + """ + + # region Interface Properties + @property + @abstractmethod + def nodes(self) -> Tuple[INode, INode]: + """:return: (2) Nodes connected by this edge.""" + raise InterfaceMethodException + # endregion + + +class IConnectivityLayer(ABC): + """ + Interface class, describing a connectivity (graph) layer containing nodes and edges. + Note that a connectivity layer can include 'separated' graphs + where not all nodes have a connection path to all other nodes. + """ + + # region Interface Properties + @property + @abstractmethod + def nodes(self) -> List[INode]: + """:return: Array-like of nodes.""" + raise InterfaceMethodException + + @property + @abstractmethod + def edges(self) -> List[IEdge]: + """:return: Array-like of edges.""" + raise InterfaceMethodException + # endregion + + # region Interface Methods + @dispatch(node=INode) + @abstractmethod + def get_connected_nodes(self, node: INode, order: int) -> List[INode]: + """ + :param node: (Root) node to base connectivity on. + If node has no edges, return an empty list. + :param order: Connectivity range. + Order <=0: empty list, 1: first order connectivity, 2: second order connectivity, etc. + :return: Array-like of nodes connected to 'node' within order of connection (excluding 'node' itself). + """ + raise InterfaceMethodException + + @dispatch(edge=IEdge) + @abstractmethod + def get_connected_nodes(self, edge: IEdge, order: int) -> List[INode]: + """ + :param edge: (Root) edge to base connectivity on. + :param order: Connectivity range. + Order <=0: empty list, 1: first order connectivity, 2: second order connectivity, etc. + :return: Array-like of nodes connected to 'edge' within order of connection. + """ + raise InterfaceMethodException + # endregion + + +class IConnectivityStack(ABC): + """ + Interface class, describing an array-like of connectivity layers. + """ + + # region Interface Properties + @property + @abstractmethod + def layers(self) -> List[IConnectivityLayer]: + """:return: Array-like of connectivity layers.""" + raise InterfaceMethodException + # endregion + + +class IDeviceLayer(ABC): + """ + Interface class, describing relation based connectivity. + """ + + # region Interface Methods + @abstractmethod + def get_connected_qubits(self, feedline: IFeedlineID) -> List[IQubitID]: + """:return: Qubit-ID's connected to feedline-ID.""" + raise InterfaceMethodException + + @abstractmethod + def get_neighbors(self, qubit: IQubitID, order: int = 1) -> List[IQubitID]: + """ + Requires :param order: to be higher or equal to 1. + :return: qubit neighbors separated by order. (order=1, nearest neighbors). + """ + raise InterfaceMethodException + + @abstractmethod + def get_edges(self, qubit: IQubitID) -> List[IEdgeID]: + """:return: All qubit-to-qubit edges from qubit-ID.""" + raise InterfaceMethodException + + @abstractmethod + def contains(self, element: Union[IFeedlineID, IQubitID, IEdgeID]) -> bool: + """:return: Boolean, whether element is part of device layer or not.""" + raise InterfaceMethodException + # endregion diff --git a/pycqed/qce_utils/control_interfaces/intrf_connectivity_surface_code.py b/pycqed/qce_utils/control_interfaces/intrf_connectivity_surface_code.py new file mode 100644 index 0000000000..e912546aa0 --- /dev/null +++ b/pycqed/qce_utils/control_interfaces/intrf_connectivity_surface_code.py @@ -0,0 +1,83 @@ +# ------------------------------------------- +# Module containing interface for surface-code connectivity structure. +# ------------------------------------------- +from abc import ABC, ABCMeta, abstractmethod +from typing import List, Union +from enum import Enum +from pycqed.qce_utils.custom_exceptions import InterfaceMethodException +from pycqed.qce_utils.control_interfaces.intrf_channel_identifier import ( + IQubitID, + IEdgeID, +) +from pycqed.qce_utils.control_interfaces.intrf_connectivity import IDeviceLayer + + +class ParityType(Enum): + STABILIZER_X = 0 + STABILIZER_Z = 1 + + +class IParityGroup(ABC): + """ + Interface class, describing qubit (nodes) and edges related to the parity group. + """ + + # region Interface Properties + @property + @abstractmethod + def parity_type(self) -> ParityType: + """:return: Parity type (X or Z type stabilizer).""" + raise InterfaceMethodException + + @property + @abstractmethod + def ancilla_id(self) -> IQubitID: + """:return: (Main) ancilla-qubit-ID from parity.""" + raise InterfaceMethodException + + @property + @abstractmethod + def data_ids(self) -> List[IQubitID]: + """:return: (All) data-qubit-ID's from parity.""" + raise InterfaceMethodException + + @property + @abstractmethod + def edge_ids(self) -> List[IEdgeID]: + """:return: (All) edge-ID's between ancilla and data qubit-ID's.""" + raise InterfaceMethodException + # endregion + + # region Interface Methods + @abstractmethod + def contains(self, element: Union[IQubitID, IEdgeID]) -> bool: + """:return: Boolean, whether element is part of parity group or not.""" + raise InterfaceMethodException + # endregion + + +class ISurfaceCodeLayer(IDeviceLayer, metaclass=ABCMeta): + """ + Interface class, describing surface-code relation based connectivity. + """ + + # region Interface Properties + @property + @abstractmethod + def parity_group_x(self) -> List[IParityGroup]: + """:return: (All) parity groups part of X-stabilizers.""" + raise InterfaceMethodException + + @property + @abstractmethod + def parity_group_z(self) -> List[IParityGroup]: + """:return: (All) parity groups part of Z-stabilizers.""" + raise InterfaceMethodException + # endregion + + # region Interface Methods + @abstractmethod + def get_parity_group(self, element: Union[IQubitID, IEdgeID]) -> IParityGroup: + """:return: Parity group of which element (edge- or qubit-ID) is part of.""" + raise InterfaceMethodException + # endregion diff --git a/pycqed/qce_utils/custom_exceptions.py b/pycqed/qce_utils/custom_exceptions.py new file mode 100644 index 0000000000..ebc5d3c4e0 --- /dev/null +++ b/pycqed/qce_utils/custom_exceptions.py @@ -0,0 +1,245 @@ +# ------------------------------------------- +# Customized exceptions for better maintainability +# ------------------------------------------- +import numpy as np + + +class InterfaceMethodException(Exception): + """ + Raised when the interface method is not implemented. + """ + + +class WeakRefException(Exception): + """ + Raised when weak visa-instance reference is being retrieved which is not available. + """ + + +class ModelParameterException(Exception): + """ + Raised when model-parameter class is being constructed using an inconsistent amount of parameters. + """ + + +class ModelParameterSubClassException(Exception): + """ + Raised when model-parameter does not sub class the expected model-parameter class. + """ + + +class KeyboardFinish(KeyboardInterrupt): + """ + Indicates that the user safely aborts/interrupts terminal process. + """ + + +class IdentifierException(Exception): + """ + Raised when (qubit) identifier is not correctly handled. + """ + + +class InvalidProminenceThresholdException(Exception): + """ + Raised when dynamic prominence threshold for peak detection is inconclusive. + """ + + +class EnumNotDefinedException(Exception): + """ + Raised when undefined enum is detected. + """ + + +class EvaluationException(Exception): + """ + Raised when optimizer parameters have not yet been evaluated. + """ + + +class OverloadSignatureNotDefinedException(Exception): + """ + Raised when overload signature for specific function is not defined or recognized. + Search-keys: overload, dispatch, multipledispatch, type casting. + """ + + +class ArrayShapeInconsistencyException(Exception): + """ + Raised when the shape of arrays are inconsistent or incompatible with each other. + """ + + # region Static Class Methods + @staticmethod + def format_arrays(x: np.ndarray, y: np.ndarray) -> 'ArrayShapeInconsistencyException': + return ArrayShapeInconsistencyException(f'Provided x-y arrays are do not have the same shape: {x.shape} != {y.shape}') + # endregion + + +class ArrayNotComplexException(Exception): + """ + Raised when not all array elements are complex. + """ + + +class StateEvaluationException(Exception): + """ + Raised when state vector evaluation (expression to real float) fails. + """ + + +class StateConditionEvaluationException(Exception): + """ + Raised when state vector condition evaluation fails. + """ + + +class WrapperException(Exception): + """ + Raised any form of exception is needed within wrapper implementation. + """ + + +class InvalidPointerException(Exception): + """ + Raised when file-pointer is invalid (path-to-file does not exist). + """ + + +class SerializationException(Exception): + """ + Raised when there is a problem serializing an object. + """ + + +class HDF5ItemTypeException(Exception): + """ + Raised when type from an item inside hdf5-file group is not recognized. + """ + + +class DataGenerationCompleteException(Exception): + """ + Raised when upper bound of data generation has been reached. + """ + + +class DataInconclusiveException(Exception): + """ + Raised when data is incomplete or inconclusive. + """ + + +class LinspaceBoundaryException(Exception): + """ + Raised when the boundary values of a linear space sampler are identical. + """ + + +class TransmonFrequencyRangeException(Exception): + """ + Raised when frequency falls outside the range of Transmon frequency. + """ + + # region Static Class Methods + @staticmethod + def format_arrays(qubit_max_frequency: float, target_frequency: float) -> 'TransmonFrequencyRangeException': + return TransmonFrequencyRangeException(f'Target frequency value {target_frequency*1e-9:2.f} [GHz] not within qubit frequency range: 0-{qubit_max_frequency*1e-9:2.f} [GHz].') + # endregion + + +class DimensionalityException(Exception): + """ + Raised when dataset dimensionality is unknown or does not match expected. + """ + + +class FactoryRequirementNotSatisfiedException(Exception): + """ + Raised when factory deployment requirement is not satisfied. + """ + + +class NoSamplesToEvaluateException(Exception): + """ + Raised when functionality depending on non-zero number of samples fails. + """ + + # region Static Class Methods + @staticmethod + def format_for_model_driven_agent() -> 'NoSamplesToEvaluateException': + return NoSamplesToEvaluateException(f"Agent can not perform sample evaluation with 0 samples. Ensure to execute 'self.next(state: CoordinateResponsePair)' with at least a single state before requesting model evaluation.") + # endregion + + +class HardwareModuleChannelException(Exception): + """ + Raised when module channel index is out of range. + """ + + +class OperationTypeException(Exception): + """ + Raised when operation type does not correspond to expected type. + """ + + +class RegexGroupException(Exception): + """ + Raised when regex match does not find intended group. + """ + + +class IsolatedGroupException(Exception): + """ + Raised when a list of grouped elements are not isolated. Members from one group are shared in another group. + """ + + +class PeakDetectionException(Exception): + """ + Raised when the number of detected peaks is not sufficient. + """ + + +class FactoryManagerKeyException(Exception): + """ + Raised when the key is not present in the factory-manager components. + """ + + # region Static Class Methods + @staticmethod + def format_log(key, dictionary) -> 'FactoryManagerKeyException': + return FactoryManagerKeyException(f'Provided key: {key} is not present in {dictionary}.') + # endregion + + +class RequestNotSupportedException(FactoryManagerKeyException): + """ + Raised when (measurement) execution request is not support or can not be handled. + """ + + +class IncompleteParameterizationException(Exception): + """ + Raised when operation is not completely parameterized. + """ + + +class ElementNotIncludedException(Exception): + """ + Raised when element (such as IQubitID, IEdgeID or IFeedlineID) is not included in the connectivity layer. + """ + + +class GenericTypeException(Exception): + """ + Raised when generic type is not found or supported. + """ + + # region Static Class Methods + @staticmethod + def format_log(generic_type: type) -> 'GenericTypeException': + return GenericTypeException(f'Generic type : {generic_type} is not supported.') + # endregion diff --git a/pycqed/qce_utils/definitions.py b/pycqed/qce_utils/definitions.py new file mode 100644 index 0000000000..2fa4aa7d74 --- /dev/null +++ b/pycqed/qce_utils/definitions.py @@ -0,0 +1,25 @@ +# ------------------------------------------- +# Project root pointer +# ------------------------------------------- +import os +from abc import ABCMeta +from pathlib import Path +ROOT_DIR = Path(os.path.dirname(os.path.abspath(__file__))).parent.parent.absolute() +CONFIG_DIR = os.path.join(ROOT_DIR, 'data', 'class_configs') +UNITDATA_DIR = os.path.join(ROOT_DIR, 'data', 'unittest_data') +TEMP_DIR = os.path.join(ROOT_DIR, 'data', 'temp') +UI_STYLE_QSS = os.path.join(ROOT_DIR, 'style.qss') +FRAME_DIR = os.path.join(TEMP_DIR, 'frames') + + +class Singleton(type): + _instances = {} + + def __call__(cls, *args, **kwargs): + if cls not in cls._instances: + cls._instances[cls] = super().__call__(*args, **kwargs) + return cls._instances[cls] + + +class SingletonABCMeta(ABCMeta, Singleton): + pass diff --git a/pycqed/qce_utils/measurement_module/__init__.py b/pycqed/qce_utils/measurement_module/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pycqed/qce_utils/module_description.md b/pycqed/qce_utils/module_description.md new file mode 100644 index 0000000000..8b982ee330 --- /dev/null +++ b/pycqed/qce_utils/module_description.md @@ -0,0 +1,14 @@ +Purpose QCE-Utils +=== +This sub-module is a direct port from the standalone QCoExtended repository. +Only well established functionality from the standalone repository is transferred to PycQED. + +- Custom exceptions. Good practice to have a library of custom exceptions, these help identify which exceptions are raised in what situations. The most used one is 'InterfaceMethodException' which is raised if an (ABC) interface abstractmethod is not implemented. + +Control Interfaces +=== +Contains: +- Channel identifier interfaces. These are identifiers for individual qubits, edges and feedlines. +- Connectivity interfaces. These describe building blocks like nodes and edges, but also larger structures like connectivity layers and stacks (multiple layers). Together they combine in the Device layer interface, exposing get methods for relationships between nodes and edges. +- Surface-code specific connectivity interfaces. These extend the connectivity interfaces by exposing surface-code specific terminology like parity groups and (qubit) frequency groups. +- Surface-code connectivity. This implements the above-mentioned interfaces to create a so called 'Surface-17' connectivity layer. This can be used throughout to obtain qubit-to-qubit relations by simply referring to their corresponding identifiers. An example of its use is during multi-qubit experiments which use inter-dependent flux trajectories (like 'flux-dance cycles'). \ No newline at end of file diff --git a/pycqed/tests/test_connectivity.py b/pycqed/tests/test_connectivity.py new file mode 100644 index 0000000000..b7fcbacc46 --- /dev/null +++ b/pycqed/tests/test_connectivity.py @@ -0,0 +1,81 @@ +import unittest +from typing import List +from pycqed.qce_utils.control_interfaces.intrf_channel_identifier import ( + IQubitID, + IEdgeID, + QubitIDObj, + EdgeIDObj, +) +from pycqed.qce_utils.control_interfaces.intrf_connectivity_surface_code import ( + ISurfaceCodeLayer, + IParityGroup, +) +from pycqed.qce_utils.control_interfaces.connectivity_surface_code import Surface17Layer + + +class Surface17ConnectivityTestCase(unittest.TestCase): + + # region Setup + @classmethod + def setUpClass(cls) -> None: + """Set up for all test cases""" + cls.layer: Surface17Layer = Surface17Layer() + cls.expected_qubit_ids: List[IQubitID] = [ + QubitIDObj('D9'), QubitIDObj('D8'), QubitIDObj('X4'), QubitIDObj('Z4'), QubitIDObj('Z2'), QubitIDObj('D6'), + QubitIDObj('D3'), QubitIDObj('D7'), QubitIDObj('D2'), QubitIDObj('X3'), QubitIDObj('Z1'), QubitIDObj('X2'), + QubitIDObj('Z3'), QubitIDObj('D5'), QubitIDObj('D4'), QubitIDObj('D1'), QubitIDObj('X1'), + ] + + def setUp(self) -> None: + """Set up for every test case""" + pass + # endregion + + # region Test Cases + def test_qubit_inclusion(self): + """Tests if all 17 expected qubits are included in the connectivity layer.""" + for qubit_id in self.expected_qubit_ids: + with self.subTest(msg=f'{qubit_id.id}'): + self.assertTrue(self.layer.contains(element=qubit_id)) + + def test_qubit_edge_count(self): + """Tests 24 unique edges are present.""" + edges: List[IEdgeID] = self.layer.edge_ids + self.assertEquals( + len(set(edges)), + 24, + msg=f"Expect 24 unique edges in a Surface-17 layout. Got: {len(set(edges))}." + ) + + def test_qubit_edge_getter(self): + """Tests various cases of obtaining qubit edges.""" + edges: List[IEdgeID] = self.layer.get_edges(qubit=QubitIDObj('D5')) + expected_edges: List[IEdgeID] = [ + EdgeIDObj(QubitIDObj('Z1'), QubitIDObj('D5')), EdgeIDObj(QubitIDObj('D5'), QubitIDObj('X3')), + EdgeIDObj(QubitIDObj('D5'), QubitIDObj('Z4')), EdgeIDObj(QubitIDObj('D5'), QubitIDObj('X2')), + ] + self.assertSetEqual( + set(edges), + set(expected_edges), + msg=f"Expects these edges: {set(expected_edges)}, instead got: {set(edges)}." + ) + + def test_get_neighbor_qubits(self): + """Tests various cases of obtaining (nearest) neighboring qubits.""" + qubits: List[IQubitID] = self.layer.get_neighbors(qubit=QubitIDObj('D5'), order=1) + expected_qubits: List[IQubitID] = [ + QubitIDObj('Z1'), QubitIDObj('X2'), QubitIDObj('X3'), QubitIDObj('Z4') + ] + self.assertSetEqual( + set(qubits), + set(expected_qubits), + msg=f"Expects these neighboring qubits: {set(expected_qubits)}, instead got: {set(qubits)}." + ) + # endregion + + # region Teardown + @classmethod + def tearDownClass(cls) -> None: + """Closes any left over processes after testing""" + pass + # endregion diff --git a/pycqed/utilities/general.py b/pycqed/utilities/general.py index 96a7c66502..af858fcd2b 100644 --- a/pycqed/utilities/general.py +++ b/pycqed/utilities/general.py @@ -885,3 +885,184 @@ def get_formatted_exception(): sstb = itb.stb2text(stb) return sstb + + +####################################### +# Flux lutman frequency to amp helpers +####################################### +def get_frequency_waveform(wave_par, flux_lutman): + ''' + Calculate detuning of waveform. + ''' + poly_coefs = flux_lutman.q_polycoeffs_freq_01_det() + out_range = flux_lutman.cfg_awg_channel_range() + ch_amp = flux_lutman.cfg_awg_channel_amplitude() + dac_amp = flux_lutman.get(wave_par) + out_volt = dac_amp*ch_amp*out_range/2 + poly_func = np.poly1d(poly_coefs) + freq = poly_func(out_volt) + return freq + +def get_DAC_amp_frequency(freq, flux_lutman): + ''' + Function to calculate DAC amp corresponding + to frequency detuning. + ''' + poly_coefs = flux_lutman.q_polycoeffs_freq_01_det() + out_range = flux_lutman.cfg_awg_channel_range() + ch_amp = flux_lutman.cfg_awg_channel_amplitude() + poly_func = np.poly1d(poly_coefs) + out_volt = max((poly_func-freq).roots) + sq_amp = out_volt/(ch_amp*out_range/2) + # Safe check in case amplitude exceeds maximum + if sq_amp>1: + print(f'WARNING had to increase gain of {flux_lutman.name} to {ch_amp}!') + flux_lutman.cfg_awg_channel_amplitude(ch_amp*1.5) + # Can't believe Im actually using recursion!!! + sq_amp = get_DAC_amp_frequency(freq, flux_lutman) + return sq_amp + +def get_Ch_amp_frequency(freq, flux_lutman, DAC_param='sq_amp'): + ''' + Function to calculate channel gain corresponding + to frequency detuning. + ''' + poly_coefs = flux_lutman.q_polycoeffs_freq_01_det() + out_range = flux_lutman.cfg_awg_channel_range() + dac_amp = flux_lutman.get(DAC_param) + poly_func = np.poly1d(poly_coefs) + out_volt = max((poly_func-freq).roots) + ch_amp = out_volt/(dac_amp*out_range/2) + if isinstance(ch_amp, complex): + print('Warning: Complex amplitude estimated, setting it to zero.') + ch_amp = 0 + return ch_amp + + +#################################### +# Surface-17 utility functions +#################################### +def get_gate_directions(q0, q1, + map_qubits=None): + """ + Helper function to determine two-qubit gate directions. + q0 and q1 should be given as high-freq and low-freq qubit, respectively. + Default map is surface-17, however other maps are supported. + """ + if map_qubits == None: + # Surface-17 layout + map_qubits = {'Z3' : [-2,-1], + 'D9' : [ 0, 2], + 'X4' : [-1, 2], + 'D8' : [-1, 1], + 'Z4' : [ 0, 1], + 'D6' : [ 1, 1], + 'D7' : [-2, 0], + 'X3' : [-1, 0], + 'D5' : [ 0, 0], + 'X2' : [ 1, 0], + 'D3' : [ 2, 0], + 'D4' : [-1,-1], + 'Z1' : [ 0,-1], + 'D2' : [ 1,-1], + 'X1' : [ 1,-2], + 'Z2' : [ 2, 1], + 'D1' : [ 0,-2] + } + V0 = np.array(map_qubits[q0]) + V1 = np.array(map_qubits[q1]) + diff = V1-V0 + dist = np.sqrt(np.sum((diff)**2)) + if dist > 1: + raise ValueError('Qubits are not nearest neighbors') + if diff[0] == 0.: + if diff[1] > 0: + return ('NE', 'SW') + else: + return ('SW', 'NE') + elif diff[1] == 0.: + if diff[0] > 0: + return ('SE', 'NW') + else: + return ('NW', 'SE') + +def get_nearest_neighbors(qubit, map_qubits=None): + """ + Helper function to determine nearest neighbors of a qubit. + Default map is surface-17, however other maps are supported. + """ + if map_qubits == None: + # Surface-17 layout + map_qubits = {'Z3' : [-2,-1], + 'D9' : [ 0, 2], + 'X4' : [-1, 2], + 'D8' : [-1, 1], + 'Z4' : [ 0, 1], + 'D6' : [ 1, 1], + 'D7' : [-2, 0], + 'X3' : [-1, 0], + 'D5' : [ 0, 0], + 'X2' : [ 1, 0], + 'D3' : [ 2, 0], + 'D4' : [-1,-1], + 'Z1' : [ 0,-1], + 'D2' : [ 1,-1], + 'X1' : [ 1,-2], + 'Z2' : [ 2, 1], + 'D1' : [ 0,-2] + } + Neighbor_dict = {} + Qubits = list(map_qubits.keys()) + Qubits.remove(qubit) + for q in Qubits: + V0 = np.array(map_qubits[qubit]) # qubit position + V1 = np.array(map_qubits[q]) + diff = V1-V0 + dist = np.sqrt(np.sum((diff)**2)) + if any(diff) == 0.: + pass + elif diff[0] == 0.: + if diff[1] == 1.: + Neighbor_dict[q] = 'SW' + elif diff[1] == -1.: + Neighbor_dict[q] = 'NE' + elif diff[1] == 0.: + if diff[0] == 1.: + Neighbor_dict[q] = 'NW' + elif diff[0] == -1.: + Neighbor_dict[q] = 'SE' + return Neighbor_dict + +def get_parking_qubits(qH, qL): + ''' + Get parked qubits during two-qubit gate + ''' + get_gate_directions(qH, qL) + # Get all neighbors of 2Q gate + qH_neighbors = get_nearest_neighbors(qH) + qL_neighbors = get_nearest_neighbors(qL) + all_neighbors = {**qH_neighbors, **qL_neighbors} + # remove qubits in 2QG + del all_neighbors[qH] + del all_neighbors[qL] + # remove high frequency qubits + if 'D4' in all_neighbors.keys(): + del all_neighbors['D4'] + if 'D5' in all_neighbors.keys(): + del all_neighbors['D5'] + if 'D6' in all_neighbors.keys(): + del all_neighbors['D6'] + _keys_to_remove = [] + # If high ferquency qubit is ancilla + if ('Z' in qH) or ('X' in qH): + for q in all_neighbors.keys(): + if ('Z' in q) or ('X' in q): + _keys_to_remove.append(q) + # If high frequency qubit is ancilla + else: + for q in all_neighbors.keys(): + if 'D' in q: + _keys_to_remove.append(q) + for q in _keys_to_remove: + del all_neighbors[q] + return list(all_neighbors.keys()) \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index eb8172a0a2..8df5ef608b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,6 +5,7 @@ # If you are installing pycqed on your computer run # `pip install qutip` before you install pycqed +qutip-qip qcodes numpy<=1.21 # Required for 3rd party packages that still rely on (deprecated) numpy dtype aliases. Such as np.float, np.complex, etc. (sklearn, ...) cython @@ -23,6 +24,7 @@ zhinst-utils; python_version > '3.6' packaging deprecated pytest +cvxpy # packages that used to have a minimum version constraint (only) adaptive # >=0.10.0 @@ -45,4 +47,6 @@ pytest # cmake # wheel # # Manually build from source. -# qutechopenql @ git+https://github.com/DiCarloLab-Delft/OpenQL@82a9881bdb2c2f2b0620c14c549c436f21d1607c # Build from commit \ No newline at end of file +# qutechopenql @ git+https://github.com/DiCarloLab-Delft/OpenQL@82a9881bdb2c2f2b0620c14c549c436f21d1607c # Build from commit +pymatching +git+https://github.com/DiCarloLab-Delft/QCoCircuits \ No newline at end of file